Coverage for nova/compute/utils.py: 94%
565 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
1# Copyright (c) 2011 OpenStack Foundation
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
15"""Compute-related Utilities and helpers."""
17import contextlib
18import functools
19import inspect
20import itertools
21import math
22import socket
23import traceback
25from oslo_log import log
26from oslo_serialization import jsonutils
27from oslo_utils import excutils
28import psutil
30from nova.accelerator import cyborg
31from nova import block_device
32from nova.compute import power_state
33from nova.compute import task_states
34from nova.compute import vm_states
35import nova.conf
36from nova import exception
37from nova import notifications
38from nova.notifications.objects import aggregate as aggregate_notification
39from nova.notifications.objects import base as notification_base
40from nova.notifications.objects import compute_task as task_notification
41from nova.notifications.objects import exception as notification_exception
42from nova.notifications.objects import flavor as flavor_notification
43from nova.notifications.objects import instance as instance_notification
44from nova.notifications.objects import keypair as keypair_notification
45from nova.notifications.objects import libvirt as libvirt_notification
46from nova.notifications.objects import metrics as metrics_notification
47from nova.notifications.objects import request_spec as reqspec_notification
48from nova.notifications.objects import scheduler as scheduler_notification
49from nova.notifications.objects import server_group as sg_notification
50from nova.notifications.objects import volume as volume_notification
51from nova import objects
52from nova.objects import fields
53from nova import rpc
54from nova import safe_utils
55from nova import utils
57CONF = nova.conf.CONF
58LOG = log.getLogger(__name__)
60# These properties are specific to a particular image by design. It
61# does not make sense for them to be inherited by server snapshots.
62# This list is distinct from the configuration option of the same
63# (lowercase) name.
64NON_INHERITABLE_IMAGE_PROPERTIES = frozenset([
65 'cinder_encryption_key_id',
66 'cinder_encryption_key_deletion_policy',
67 'img_signature',
68 'img_signature_hash_method',
69 'img_signature_key_type',
70 'img_signature_certificate_uuid'])
72# Properties starting with these namespaces are reserved for internal
73# use by other services. It does not make sense (and may cause a request
74# fail) if we include them in a snapshot.
75NON_INHERITABLE_IMAGE_NAMESPACES = frozenset([
76 'os_glance',
77])
80def exception_to_dict(fault, message=None):
81 """Converts exceptions to a dict for use in notifications.
83 :param fault: Exception that occurred
84 :param message: Optional fault message, otherwise the message is derived
85 from the fault itself.
86 :returns: dict with the following items:
88 - exception: the fault itself
89 - message: one of (in priority order):
90 - the provided message to this method
91 - a formatted NovaException message
92 - the fault class name
93 - code: integer code for the fault (defaults to 500)
94 """
95 # TODO(johngarbutt) move to nova/exception.py to share with wrap_exception
97 code = 500
98 if hasattr(fault, "kwargs"):
99 code = fault.kwargs.get('code', 500)
101 # get the message from the exception that was thrown
102 # if that does not exist, use the name of the exception class itself
103 try:
104 if not message:
105 message = fault.format_message()
106 # These exception handlers are broad so we don't fail to log the fault
107 # just because there is an unexpected error retrieving the message
108 except Exception:
109 # In this case either we have a NovaException which failed to format
110 # the message or we have a non-nova exception which could contain
111 # sensitive details. Since we're not sure, be safe and set the message
112 # to the exception class name. Note that we don't guard on
113 # context.is_admin here because the message is always shown in the API,
114 # even to non-admin users (e.g. NoValidHost) but only the traceback
115 # details are shown to users with the admin role. Checking for admin
116 # context here is also not helpful because admins can perform
117 # operations on a tenant user's server (migrations, reboot, etc) and
118 # service startup and periodic tasks could take actions on a server
119 # and those use an admin context.
120 message = fault.__class__.__name__
121 # NOTE(dripton) The message field in the database is limited to 255 chars.
122 # MySQL silently truncates overly long messages, but PostgreSQL throws an
123 # error if we don't truncate it.
124 u_message = utils.safe_truncate(message, 255)
125 fault_dict = dict(exception=fault)
126 fault_dict["message"] = u_message
127 fault_dict["code"] = code
128 return fault_dict
131def _get_fault_details(exc_info, error_code):
132 details = ''
133 # TODO(mriedem): Why do we only include the details if the code is 500?
134 # Though for non-nova exceptions the code will probably be 500.
135 if exc_info and error_code == 500:
136 # We get the full exception details including the value since
137 # the fault message may not contain that information for non-nova
138 # exceptions (see exception_to_dict).
139 details = ''.join(traceback.format_exception(
140 exc_info[0], exc_info[1], exc_info[2]))
141 return str(details)
144def add_instance_fault_from_exc(context, instance, fault, exc_info=None,
145 fault_message=None):
146 """Adds the specified fault to the database."""
148 fault_obj = objects.InstanceFault(context=context)
149 fault_obj.host = CONF.host
150 fault_obj.instance_uuid = instance.uuid
151 fault_obj.update(exception_to_dict(fault, message=fault_message))
152 code = fault_obj.code
153 fault_obj.details = _get_fault_details(exc_info, code)
154 fault_obj.create()
157def get_device_name_for_instance(instance, bdms, device):
158 """Validates (or generates) a device name for instance.
160 This method is a wrapper for get_next_device_name that gets the list
161 of used devices and the root device from a block device mapping.
163 :raises TooManyDiskDevices: if the maximum allowed devices to attach to a
164 single instance is exceeded.
165 """
166 mappings = block_device.instance_block_mapping(instance, bdms)
167 return get_next_device_name(instance, mappings.values(),
168 mappings['root'], device)
171def default_device_names_for_instance(instance, root_device_name,
172 *block_device_lists):
173 """Generate missing device names for an instance.
176 :raises TooManyDiskDevices: if the maximum allowed devices to attach to a
177 single instance is exceeded.
178 """
180 dev_list = [bdm.device_name
181 for bdm in itertools.chain(*block_device_lists)
182 if bdm.device_name]
183 if root_device_name not in dev_list:
184 dev_list.append(root_device_name)
186 for bdm in itertools.chain(*block_device_lists):
187 dev = bdm.device_name
188 if not dev:
189 dev = get_next_device_name(instance, dev_list,
190 root_device_name)
191 bdm.device_name = dev
192 bdm.save()
193 dev_list.append(dev)
196def check_max_disk_devices_to_attach(num_devices):
197 maximum = CONF.compute.max_disk_devices_to_attach
198 if maximum < 0: 198 ↛ 201line 198 didn't jump to line 201 because the condition on line 198 was always true
199 return
201 if num_devices > maximum:
202 raise exception.TooManyDiskDevices(maximum=maximum)
205def get_next_device_name(instance, device_name_list,
206 root_device_name=None, device=None):
207 """Validates (or generates) a device name for instance.
209 If device is not set, it will generate a unique device appropriate
210 for the instance. It uses the root_device_name (if provided) and
211 the list of used devices to find valid device names. If the device
212 name is valid but applicable to a different backend (for example
213 /dev/vdc is specified but the backend uses /dev/xvdc), the device
214 name will be converted to the appropriate format.
216 :raises TooManyDiskDevices: if the maximum allowed devices to attach to a
217 single instance is exceeded.
218 """
220 req_prefix = None
221 req_letter = None
223 if device:
224 try:
225 req_prefix, req_letter = block_device.match_device(device)
226 except (TypeError, AttributeError, ValueError):
227 raise exception.InvalidDevicePath(path=device)
229 if not root_device_name: 229 ↛ 230line 229 didn't jump to line 230 because the condition on line 229 was never true
230 root_device_name = block_device.DEFAULT_ROOT_DEV_NAME
232 try:
233 prefix = block_device.match_device(
234 block_device.prepend_dev(root_device_name))[0]
235 except (TypeError, AttributeError, ValueError):
236 raise exception.InvalidDevicePath(path=root_device_name)
238 if req_prefix != prefix:
239 LOG.debug("Using %(prefix)s instead of %(req_prefix)s",
240 {'prefix': prefix, 'req_prefix': req_prefix})
242 used_letters = set()
243 for device_path in device_name_list:
244 letter = block_device.get_device_letter(device_path)
245 used_letters.add(letter)
247 check_max_disk_devices_to_attach(len(used_letters) + 1)
249 if not req_letter:
250 req_letter = _get_unused_letter(used_letters)
252 if req_letter in used_letters:
253 raise exception.DevicePathInUse(path=device)
255 return prefix + req_letter
258def get_root_bdm(context, instance, bdms=None):
259 if bdms is None:
260 if isinstance(instance, objects.Instance): 260 ↛ 263line 260 didn't jump to line 263 because the condition on line 260 was always true
261 uuid = instance.uuid
262 else:
263 uuid = instance['uuid']
264 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
265 context, uuid)
267 return bdms.root_bdm()
270def is_volume_backed_instance(context, instance, bdms=None):
271 root_bdm = get_root_bdm(context, instance, bdms)
272 if root_bdm is not None:
273 return root_bdm.is_volume
274 # in case we hit a very old instance without root bdm, we _assume_ that
275 # instance is backed by a volume, if and only if image_ref is not set
276 if isinstance(instance, objects.Instance): 276 ↛ 279line 276 didn't jump to line 279 because the condition on line 276 was always true
277 return not instance.image_ref
279 return not instance['image_ref']
282def heal_reqspec_is_bfv(ctxt, request_spec, instance):
283 """Calculates the is_bfv flag for a RequestSpec created before Rocky.
285 Starting in Rocky, new instances have their RequestSpec created with
286 the "is_bfv" flag to indicate if they are volume-backed which is used
287 by the scheduler when determining root disk resource allocations.
289 RequestSpecs created before Rocky will not have the is_bfv flag set
290 so we need to calculate it here and update the RequestSpec.
292 :param ctxt: nova.context.RequestContext auth context
293 :param request_spec: nova.objects.RequestSpec used for scheduling
294 :param instance: nova.objects.Instance being scheduled
295 """
296 if 'is_bfv' in request_spec:
297 return
298 # Determine if this is a volume-backed instance and set the field
299 # in the request spec accordingly.
300 request_spec.is_bfv = is_volume_backed_instance(ctxt, instance)
301 request_spec.save()
304def convert_mb_to_ceil_gb(mb_value):
305 gb_int = 0
306 if mb_value:
307 gb_float = mb_value / 1024.0
308 # ensure we reserve/allocate enough space by rounding up to nearest GB
309 gb_int = int(math.ceil(gb_float))
310 return gb_int
313def _get_unused_letter(used_letters):
314 # Return the first unused device letter
315 index = 0
316 while True:
317 letter = block_device.generate_device_letter(index)
318 if letter not in used_letters:
319 return letter
320 index += 1
323def get_value_from_system_metadata(instance, key, type, default):
324 """Get a value of a specified type from image metadata.
326 @param instance: The instance object
327 @param key: The name of the property to get
328 @param type: The python type the value is be returned as
329 @param default: The value to return if key is not set or not the right type
330 """
331 value = instance.system_metadata.get(key, default)
332 try:
333 return type(value)
334 except ValueError:
335 LOG.warning("Metadata value %(value)s for %(key)s is not of "
336 "type %(type)s. Using default value %(default)s.",
337 {'value': value, 'key': key, 'type': type,
338 'default': default}, instance=instance)
339 return default
342def notify_usage_exists(notifier, context, instance_ref, host,
343 current_period=False, ignore_missing_network_data=True,
344 system_metadata=None, extra_usage_info=None):
345 """Generates 'exists' unversioned legacy and transformed notification
346 for an instance for usage auditing purposes.
348 :param notifier: a messaging.Notifier
349 :param context: request context for the current operation
350 :param instance_ref: nova.objects.Instance object from which to report
351 usage
352 :param host: the host emitting the notification
353 :param current_period: if True, this will generate a usage for the
354 current usage period; if False, this will generate a usage for the
355 previous audit period.
356 :param ignore_missing_network_data: if True, log any exceptions generated
357 while getting network info; if False, raise the exception.
358 :param system_metadata: system_metadata override for the instance. If
359 None, the instance_ref.system_metadata will be used.
360 :param extra_usage_info: Dictionary containing extra values to add or
361 override in the notification if not None.
362 """
364 audit_start, audit_end = notifications.audit_period_bounds(current_period)
366 if system_metadata is None:
367 system_metadata = utils.instance_sys_meta(instance_ref)
369 # add image metadata to the notification:
370 image_meta = notifications.image_meta(system_metadata)
372 extra_info = dict(audit_period_beginning=str(audit_start),
373 audit_period_ending=str(audit_end),
374 image_meta=image_meta)
376 if extra_usage_info:
377 extra_info.update(extra_usage_info)
379 notify_about_instance_usage(notifier, context, instance_ref, 'exists',
380 extra_usage_info=extra_info)
382 audit_period = instance_notification.AuditPeriodPayload(
383 audit_period_beginning=audit_start,
384 audit_period_ending=audit_end,
385 )
387 payload = instance_notification.InstanceExistsPayload(
388 context=context,
389 instance=instance_ref,
390 audit_period=audit_period,
391 )
393 notification = instance_notification.InstanceExistsNotification(
394 context=context,
395 priority=fields.NotificationPriority.INFO,
396 publisher=notification_base.NotificationPublisher(
397 host=host, source=fields.NotificationSource.COMPUTE,
398 ),
399 event_type=notification_base.EventType(
400 object='instance',
401 action=fields.NotificationAction.EXISTS,
402 ),
403 payload=payload,
404 )
405 notification.emit(context)
408def notify_about_instance_usage(notifier, context, instance, event_suffix,
409 network_info=None, extra_usage_info=None,
410 fault=None, best_effort=False):
411 """Send an unversioned legacy notification about an instance.
413 All new notifications should use notify_about_instance_action which sends
414 a versioned notification.
416 :param notifier: a messaging.Notifier
417 :param event_suffix: Event type like "delete.start" or "exists"
418 :param network_info: Networking information, if provided.
419 :param extra_usage_info: Dictionary containing extra values to add or
420 override in the notification.
421 """
422 if not extra_usage_info:
423 extra_usage_info = {}
425 usage_info = notifications.info_from_instance(context, instance,
426 network_info, populate_image_ref_url=True, **extra_usage_info)
428 if fault:
429 # NOTE(johngarbutt) mirrors the format in wrap_exception
430 fault_payload = exception_to_dict(fault)
431 LOG.debug(fault_payload["message"], instance=instance)
432 usage_info.update(fault_payload)
434 if event_suffix.endswith("error"):
435 method = notifier.error
436 else:
437 method = notifier.info
439 try:
440 method(context, 'compute.instance.%s' % event_suffix, usage_info)
441 except Exception as e:
442 if best_effort: 442 ↛ 446line 442 didn't jump to line 446 because the condition on line 442 was always true
443 LOG.error('Exception during notification sending: %s. '
444 'Attempting to proceed with normal operation.', e)
445 else:
446 raise e
449def _get_fault_and_priority_from_exception(exception: Exception):
450 fault = None
451 priority = fields.NotificationPriority.INFO
453 if not exception:
454 return fault, priority
456 fault = notification_exception.ExceptionPayload.from_exception(exception)
457 priority = fields.NotificationPriority.ERROR
459 return fault, priority
462@rpc.if_notifications_enabled
463def notify_about_instance_action(context, instance, host, action, phase=None,
464 source=fields.NotificationSource.COMPUTE,
465 exception=None, bdms=None, best_effort=False):
466 """Send versioned notification about the action made on the instance
467 :param instance: the instance which the action performed on
468 :param host: the host emitting the notification
469 :param action: the name of the action
470 :param phase: the phase of the action
471 :param source: the source of the notification
472 :param exception: the thrown exception (used in error notifications)
473 :param bdms: BlockDeviceMappingList object for the instance. If it is not
474 provided then we will load it from the db if so configured
475 """
476 fault, priority = _get_fault_and_priority_from_exception(exception)
477 payload = instance_notification.InstanceActionPayload(
478 context=context,
479 instance=instance,
480 fault=fault,
481 bdms=bdms)
482 notification = instance_notification.InstanceActionNotification(
483 context=context,
484 priority=priority,
485 publisher=notification_base.NotificationPublisher(
486 host=host, source=source),
487 event_type=notification_base.EventType(
488 object='instance',
489 action=action,
490 phase=phase),
491 payload=payload)
492 try:
493 notification.emit(context)
494 except Exception as e:
495 if best_effort: 495 ↛ 499line 495 didn't jump to line 499 because the condition on line 495 was always true
496 LOG.error('Exception during notification sending: %s. '
497 'Attempting to proceed with normal operation.', e)
498 else:
499 raise e
502@rpc.if_notifications_enabled
503def notify_about_instance_create(context, instance, host, phase=None,
504 exception=None, bdms=None):
505 """Send versioned notification about instance creation
507 :param context: the request context
508 :param instance: the instance being created
509 :param host: the host emitting the notification
510 :param phase: the phase of the creation
511 :param exception: the thrown exception (used in error notifications)
512 :param bdms: BlockDeviceMappingList object for the instance. If it is not
513 provided then we will load it from the db if so configured
514 """
515 fault, priority = _get_fault_and_priority_from_exception(exception)
516 payload = instance_notification.InstanceCreatePayload(
517 context=context,
518 instance=instance,
519 fault=fault,
520 bdms=bdms)
521 notification = instance_notification.InstanceCreateNotification(
522 context=context,
523 priority=priority,
524 publisher=notification_base.NotificationPublisher(
525 host=host, source=fields.NotificationSource.COMPUTE),
526 event_type=notification_base.EventType(
527 object='instance',
528 action=fields.NotificationAction.CREATE,
529 phase=phase),
530 payload=payload)
531 notification.emit(context)
534@rpc.if_notifications_enabled
535def notify_about_scheduler_action(context, request_spec, action, phase=None,
536 source=fields.NotificationSource.SCHEDULER):
537 """Send versioned notification about the action made by the scheduler
538 :param context: the RequestContext object
539 :param request_spec: the RequestSpec object
540 :param action: the name of the action
541 :param phase: the phase of the action
542 :param source: the source of the notification
543 """
544 payload = reqspec_notification.RequestSpecPayload(
545 request_spec=request_spec)
546 notification = scheduler_notification.SelectDestinationsNotification(
547 context=context,
548 priority=fields.NotificationPriority.INFO,
549 publisher=notification_base.NotificationPublisher(
550 host=CONF.host, source=source),
551 event_type=notification_base.EventType(
552 object='scheduler',
553 action=action,
554 phase=phase),
555 payload=payload)
556 notification.emit(context)
559@rpc.if_notifications_enabled
560def notify_about_volume_attach_detach(context, instance, host, action, phase,
561 volume_id=None, exception=None):
562 """Send versioned notification about the action made on the instance
563 :param instance: the instance which the action performed on
564 :param host: the host emitting the notification
565 :param action: the name of the action
566 :param phase: the phase of the action
567 :param volume_id: id of the volume will be attached
568 :param exception: the thrown exception (used in error notifications)
569 """
570 fault, priority = _get_fault_and_priority_from_exception(exception)
571 payload = instance_notification.InstanceActionVolumePayload(
572 context=context,
573 instance=instance,
574 fault=fault,
575 volume_id=volume_id)
576 notification = instance_notification.InstanceActionVolumeNotification(
577 context=context,
578 priority=priority,
579 publisher=notification_base.NotificationPublisher(
580 host=host, source=fields.NotificationSource.COMPUTE),
581 event_type=notification_base.EventType(
582 object='instance',
583 action=action,
584 phase=phase),
585 payload=payload)
586 notification.emit(context)
589@rpc.if_notifications_enabled
590def notify_about_share_attach_detach(context, instance, host, action, phase,
591 share_id=None, exception=None):
592 """Send versioned notification about the action made on the instance
593 :param instance: the instance which the action performed on
594 :param host: the host emitting the notification
595 :param action: the name of the action
596 :param phase: the phase of the action
597 :param share_info: share related information
598 :param exception: the thrown exception (used in error notifications)
599 """
600 fault, priority = _get_fault_and_priority_from_exception(exception)
601 payload = instance_notification.InstanceActionSharePayload(
602 context=context,
603 instance=instance,
604 fault=fault,
605 share_id=share_id
606 )
607 notification = instance_notification.InstanceActionShareNotification(
608 context=context,
609 priority=priority,
610 publisher=notification_base.NotificationPublisher(
611 host=host, source=fields.NotificationSource.API),
612 event_type=notification_base.EventType(
613 object='instance',
614 action=action,
615 phase=phase),
616 payload=payload)
617 notification.emit(context)
620@rpc.if_notifications_enabled
621def notify_about_instance_rescue_action(context, instance, host,
622 rescue_image_ref, phase=None,
623 exception=None):
624 """Send versioned notification about the action made on the instance
626 :param instance: the instance which the action performed on
627 :param host: the host emitting the notification
628 :param rescue_image_ref: the rescue image ref
629 :param phase: the phase of the action
630 :param exception: the thrown exception (used in error notifications)
631 """
632 fault, priority = _get_fault_and_priority_from_exception(exception)
633 payload = instance_notification.InstanceActionRescuePayload(
634 context=context,
635 instance=instance,
636 fault=fault,
637 rescue_image_ref=rescue_image_ref)
639 notification = instance_notification.InstanceActionRescueNotification(
640 context=context,
641 priority=priority,
642 publisher=notification_base.NotificationPublisher(
643 host=host, source=fields.NotificationSource.COMPUTE),
644 event_type=notification_base.EventType(
645 object='instance',
646 action=fields.NotificationAction.RESCUE,
647 phase=phase),
648 payload=payload)
649 notification.emit(context)
652@rpc.if_notifications_enabled
653def notify_about_keypair_action(context, keypair, action, phase):
654 """Send versioned notification about the keypair action on the instance
656 :param context: the request context
657 :param keypair: the keypair which the action performed on
658 :param action: the name of the action
659 :param phase: the phase of the action
660 """
661 payload = keypair_notification.KeypairPayload(keypair=keypair)
662 notification = keypair_notification.KeypairNotification(
663 priority=fields.NotificationPriority.INFO,
664 publisher=notification_base.NotificationPublisher(
665 host=CONF.host, source=fields.NotificationSource.API),
666 event_type=notification_base.EventType(
667 object='keypair',
668 action=action,
669 phase=phase),
670 payload=payload)
671 notification.emit(context)
674@rpc.if_notifications_enabled
675def notify_about_volume_swap(context, instance, host, phase,
676 old_volume_id, new_volume_id, exception=None):
677 """Send versioned notification about the volume swap action
678 on the instance
680 :param context: the request context
681 :param instance: the instance which the action performed on
682 :param host: the host emitting the notification
683 :param phase: the phase of the action
684 :param old_volume_id: the ID of the volume that is copied from and detached
685 :param new_volume_id: the ID of the volume that is copied to and attached
686 :param exception: an exception
687 """
688 fault, priority = _get_fault_and_priority_from_exception(exception)
689 payload = instance_notification.InstanceActionVolumeSwapPayload(
690 context=context,
691 instance=instance,
692 fault=fault,
693 old_volume_id=old_volume_id,
694 new_volume_id=new_volume_id)
696 instance_notification.InstanceActionVolumeSwapNotification(
697 context=context,
698 priority=priority,
699 publisher=notification_base.NotificationPublisher(
700 host=host, source=fields.NotificationSource.COMPUTE),
701 event_type=notification_base.EventType(
702 object='instance',
703 action=fields.NotificationAction.VOLUME_SWAP,
704 phase=phase),
705 payload=payload).emit(context)
708@rpc.if_notifications_enabled
709def notify_about_instance_snapshot(context, instance, host, phase,
710 snapshot_image_id):
711 """Send versioned notification about the snapshot action executed on the
712 instance
714 :param context: the request context
715 :param instance: the instance from which a snapshot image is being created
716 :param host: the host emitting the notification
717 :param phase: the phase of the action
718 :param snapshot_image_id: the ID of the snapshot
719 """
720 payload = instance_notification.InstanceActionSnapshotPayload(
721 context=context,
722 instance=instance,
723 fault=None,
724 snapshot_image_id=snapshot_image_id)
726 instance_notification.InstanceActionSnapshotNotification(
727 context=context,
728 priority=fields.NotificationPriority.INFO,
729 publisher=notification_base.NotificationPublisher(
730 host=host, source=fields.NotificationSource.COMPUTE),
731 event_type=notification_base.EventType(
732 object='instance',
733 action=fields.NotificationAction.SNAPSHOT,
734 phase=phase),
735 payload=payload).emit(context)
738@rpc.if_notifications_enabled
739def notify_about_resize_prep_instance(context, instance, host, phase,
740 new_flavor):
741 """Send versioned notification about the instance resize action
742 on the instance
744 :param context: the request context
745 :param instance: the instance which the resize action performed on
746 :param host: the host emitting the notification
747 :param phase: the phase of the action
748 :param new_flavor: new flavor
749 """
751 payload = instance_notification.InstanceActionResizePrepPayload(
752 context=context,
753 instance=instance,
754 fault=None,
755 new_flavor=flavor_notification.FlavorPayload(flavor=new_flavor))
757 instance_notification.InstanceActionResizePrepNotification(
758 context=context,
759 priority=fields.NotificationPriority.INFO,
760 publisher=notification_base.NotificationPublisher(
761 host=host, source=fields.NotificationSource.COMPUTE),
762 event_type=notification_base.EventType(
763 object='instance',
764 action=fields.NotificationAction.RESIZE_PREP,
765 phase=phase),
766 payload=payload).emit(context)
769def notify_about_server_group_update(context, event_suffix, sg_payload):
770 """Send a notification about server group update.
772 :param event_suffix: Event type like "create.start" or "create.end"
773 :param sg_payload: payload for server group update
774 """
775 notifier = rpc.get_notifier(service='servergroup')
777 notifier.info(context, 'servergroup.%s' % event_suffix, sg_payload)
780def notify_about_aggregate_update(context, event_suffix, aggregate_payload):
781 """Send a notification about aggregate update.
783 :param event_suffix: Event type like "create.start" or "create.end"
784 :param aggregate_payload: payload for aggregate update
785 """
786 aggregate_identifier = aggregate_payload.get('aggregate_id', None)
787 if not aggregate_identifier:
788 aggregate_identifier = aggregate_payload.get('name', None)
789 if not aggregate_identifier:
790 LOG.debug("No aggregate id or name specified for this "
791 "notification and it will be ignored")
792 return
794 notifier = rpc.get_notifier(service='aggregate',
795 host=aggregate_identifier)
797 notifier.info(context, 'aggregate.%s' % event_suffix, aggregate_payload)
800@rpc.if_notifications_enabled
801def notify_about_aggregate_action(context, aggregate, action, phase):
802 payload = aggregate_notification.AggregatePayload(aggregate)
803 notification = aggregate_notification.AggregateNotification(
804 priority=fields.NotificationPriority.INFO,
805 publisher=notification_base.NotificationPublisher(
806 host=CONF.host, source=fields.NotificationSource.API),
807 event_type=notification_base.EventType(
808 object='aggregate',
809 action=action,
810 phase=phase),
811 payload=payload)
812 notification.emit(context)
815@rpc.if_notifications_enabled
816def notify_about_aggregate_cache(context, aggregate, host, image_status,
817 index, total):
818 """Send a notification about aggregate cache_images progress.
820 :param context: The RequestContext
821 :param aggregate: The target aggregate
822 :param host: The host within the aggregate for which to report status
823 :param image_status: The result from the compute host, which is a dict
824 of {image_id: status}
825 :param index: An integer indicating progress toward completion, between
826 1 and $total
827 :param total: The total number of hosts being processed in this operation,
828 to bound $index
829 """
830 success_statuses = ('cached', 'existing')
831 payload = aggregate_notification.AggregateCachePayload(aggregate,
832 host,
833 index,
834 total)
835 payload.images_cached = []
836 payload.images_failed = []
837 for img, status in image_status.items():
838 if status in success_statuses: 838 ↛ 839line 838 didn't jump to line 839 because the condition on line 838 was never true
839 payload.images_cached.append(img)
840 else:
841 payload.images_failed.append(img)
842 notification = aggregate_notification.AggregateCacheNotification(
843 priority=fields.NotificationPriority.INFO,
844 publisher=notification_base.NotificationPublisher(
845 host=CONF.host, source=fields.NotificationSource.CONDUCTOR),
846 event_type=notification_base.EventType(
847 object='aggregate',
848 action=fields.NotificationAction.IMAGE_CACHE,
849 phase=fields.NotificationPhase.PROGRESS),
850 payload=payload)
851 notification.emit(context)
854def notify_about_host_update(context, event_suffix, host_payload):
855 """Send a notification about host update.
857 :param event_suffix: Event type like "create.start" or "create.end"
858 :param host_payload: payload for host update. It is a dict and there
859 should be at least the 'host_name' key in this
860 dict.
861 """
862 host_identifier = host_payload.get('host_name')
863 if not host_identifier: 863 ↛ 864line 863 didn't jump to line 864 because the condition on line 863 was never true
864 LOG.warning("No host name specified for the notification of "
865 "HostAPI.%s and it will be ignored", event_suffix)
866 return
868 notifier = rpc.get_notifier(service='api', host=host_identifier)
870 notifier.info(context, 'HostAPI.%s' % event_suffix, host_payload)
873@rpc.if_notifications_enabled
874def notify_about_server_group_action(context, group, action):
875 payload = sg_notification.ServerGroupPayload(group)
876 notification = sg_notification.ServerGroupNotification(
877 priority=fields.NotificationPriority.INFO,
878 publisher=notification_base.NotificationPublisher(
879 host=CONF.host, source=fields.NotificationSource.API),
880 event_type=notification_base.EventType(
881 object='server_group',
882 action=action),
883 payload=payload)
884 notification.emit(context)
887@rpc.if_notifications_enabled
888def notify_about_server_group_add_member(context, group_id):
889 group = objects.InstanceGroup.get_by_uuid(context, group_id)
890 payload = sg_notification.ServerGroupPayload(group)
891 notification = sg_notification.ServerGroupNotification(
892 priority=fields.NotificationPriority.INFO,
893 publisher=notification_base.NotificationPublisher(
894 host=CONF.host, source=fields.NotificationSource.API),
895 event_type=notification_base.EventType(
896 object='server_group',
897 action=fields.NotificationAction.ADD_MEMBER),
898 payload=payload)
899 notification.emit(context)
902@rpc.if_notifications_enabled
903def notify_about_instance_rebuild(context, instance, host,
904 action=fields.NotificationAction.REBUILD,
905 phase=None,
906 source=fields.NotificationSource.COMPUTE,
907 exception=None, bdms=None):
908 """Send versioned notification about instance rebuild
910 :param instance: the instance which the action performed on
911 :param host: the host emitting the notification
912 :param action: the name of the action
913 :param phase: the phase of the action
914 :param source: the source of the notification
915 :param exception: the thrown exception (used in error notifications)
916 :param bdms: BlockDeviceMappingList object for the instance. If it is not
917 provided then we will load it from the db if so configured
918 """
919 fault, priority = _get_fault_and_priority_from_exception(exception)
920 payload = instance_notification.InstanceActionRebuildPayload(
921 context=context,
922 instance=instance,
923 fault=fault,
924 bdms=bdms)
925 notification = instance_notification.InstanceActionRebuildNotification(
926 context=context,
927 priority=priority,
928 publisher=notification_base.NotificationPublisher(
929 host=host, source=source),
930 event_type=notification_base.EventType(
931 object='instance',
932 action=action,
933 phase=phase),
934 payload=payload)
935 notification.emit(context)
938@rpc.if_notifications_enabled
939def notify_about_metrics_update(context, host, host_ip, nodename,
940 monitor_metric_list):
941 """Send versioned notification about updating metrics
943 :param context: the request context
944 :param host: the host emitting the notification
945 :param host_ip: the IP address of the host
946 :param nodename: the node name
947 :param monitor_metric_list: the MonitorMetricList object
948 """
949 payload = metrics_notification.MetricsPayload(
950 host=host,
951 host_ip=host_ip,
952 nodename=nodename,
953 monitor_metric_list=monitor_metric_list)
954 notification = metrics_notification.MetricsNotification(
955 context=context,
956 priority=fields.NotificationPriority.INFO,
957 publisher=notification_base.NotificationPublisher(
958 host=host, source=fields.NotificationSource.COMPUTE),
959 event_type=notification_base.EventType(
960 object='metrics',
961 action=fields.NotificationAction.UPDATE),
962 payload=payload)
963 notification.emit(context)
966@rpc.if_notifications_enabled
967def notify_about_libvirt_connect_error(context, ip, exception):
968 """Send a versioned notification about libvirt connect error.
970 :param context: the request context
971 :param ip: the IP address of the host
972 :param exception: the thrown exception
973 """
974 fault, _ = _get_fault_and_priority_from_exception(exception)
975 payload = libvirt_notification.LibvirtErrorPayload(ip=ip, reason=fault)
976 notification = libvirt_notification.LibvirtErrorNotification(
977 priority=fields.NotificationPriority.ERROR,
978 publisher=notification_base.NotificationPublisher(
979 host=CONF.host, source=fields.NotificationSource.COMPUTE),
980 event_type=notification_base.EventType(
981 object='libvirt',
982 action=fields.NotificationAction.CONNECT,
983 phase=fields.NotificationPhase.ERROR),
984 payload=payload)
985 notification.emit(context)
988@rpc.if_notifications_enabled
989def notify_about_volume_usage(context, vol_usage, host):
990 """Send versioned notification about the volume usage
992 :param context: the request context
993 :param vol_usage: the volume usage object
994 :param host: the host emitting the notification
995 """
996 payload = volume_notification.VolumeUsagePayload(
997 vol_usage=vol_usage)
998 notification = volume_notification.VolumeUsageNotification(
999 context=context,
1000 priority=fields.NotificationPriority.INFO,
1001 publisher=notification_base.NotificationPublisher(
1002 host=host, source=fields.NotificationSource.COMPUTE),
1003 event_type=notification_base.EventType(
1004 object='volume',
1005 action=fields.NotificationAction.USAGE),
1006 payload=payload)
1007 notification.emit(context)
1010@rpc.if_notifications_enabled
1011def notify_about_compute_task_error(context, action, instance_uuid,
1012 request_spec, state, exception):
1013 """Send a versioned notification about compute task error.
1015 :param context: the request context
1016 :param action: the name of the action
1017 :param instance_uuid: the UUID of the instance
1018 :param request_spec: the request spec object or
1019 the dict includes request spec information
1020 :param state: the vm state of the instance
1021 :param exception: the thrown exception
1022 :param tb: the traceback
1023 """
1024 if (request_spec is not None and
1025 not isinstance(request_spec, objects.RequestSpec)):
1026 request_spec = objects.RequestSpec.from_primitives(
1027 context, request_spec, {})
1029 fault, _ = _get_fault_and_priority_from_exception(exception)
1030 payload = task_notification.ComputeTaskPayload(
1031 instance_uuid=instance_uuid, request_spec=request_spec, state=state,
1032 reason=fault)
1033 notification = task_notification.ComputeTaskNotification(
1034 priority=fields.NotificationPriority.ERROR,
1035 publisher=notification_base.NotificationPublisher(
1036 host=CONF.host, source=fields.NotificationSource.CONDUCTOR),
1037 event_type=notification_base.EventType(
1038 object='compute_task',
1039 action=action,
1040 phase=fields.NotificationPhase.ERROR),
1041 payload=payload)
1042 notification.emit(context)
1045def refresh_info_cache_for_instance(context, instance):
1046 """Refresh the info cache for an instance.
1048 :param instance: The instance object.
1049 """
1050 if instance.info_cache is not None and not instance.deleted: 1050 ↛ exitline 1050 didn't return from function 'refresh_info_cache_for_instance' because the condition on line 1050 was always true
1051 # Catch the exception in case the instance got deleted after the check
1052 # instance.deleted was executed
1053 try:
1054 instance.info_cache.refresh()
1055 except exception.InstanceInfoCacheNotFound:
1056 LOG.debug("Can not refresh info_cache because instance "
1057 "was not found", instance=instance)
1060def get_reboot_type(task_state, current_power_state):
1061 """Checks if the current instance state requires a HARD reboot."""
1062 if current_power_state != power_state.RUNNING:
1063 return 'HARD'
1064 if task_state in task_states.soft_reboot_states:
1065 return 'SOFT'
1066 return 'HARD'
1069def get_machine_ips():
1070 """Get the machine's ip addresses
1072 :returns: list of Strings of ip addresses
1073 """
1074 addresses = []
1075 for interface, ifaddresses in psutil.net_if_addrs().items():
1076 for ifaddress in ifaddresses:
1077 if ifaddress.family not in (socket.AF_INET, socket.AF_INET6):
1078 continue
1080 addr = ifaddress.address
1082 # If we have an ipv6 address remove the
1083 # %ether_interface at the end
1084 if ifaddress.family == socket.AF_INET6:
1085 addr = addr.split('%')[0]
1087 addresses.append(addr)
1089 return addresses
1092def upsize_quota_delta(new_flavor, old_flavor):
1093 """Calculate deltas required to adjust quota for an instance upsize.
1095 :param new_flavor: the target instance type
1096 :param old_flavor: the original instance type
1097 """
1098 def _quota_delta(resource):
1099 return (new_flavor[resource] - old_flavor[resource])
1101 deltas = {}
1102 if _quota_delta('vcpus') > 0:
1103 deltas['cores'] = _quota_delta('vcpus')
1104 if _quota_delta('memory_mb') > 0:
1105 deltas['ram'] = _quota_delta('memory_mb')
1107 return deltas
1110def get_headroom(quotas, usages, deltas):
1111 headroom = {res: quotas[res] - usages[res]
1112 for res in quotas.keys()}
1113 # If quota_cores is unlimited [-1]:
1114 # - set cores headroom based on instances headroom:
1115 if quotas.get('cores') == -1:
1116 if deltas.get('cores'): 1116 ↛ 1120line 1116 didn't jump to line 1120 because the condition on line 1116 was always true
1117 hc = headroom.get('instances', 1) * deltas['cores']
1118 headroom['cores'] = hc / deltas.get('instances', 1)
1119 else:
1120 headroom['cores'] = headroom.get('instances', 1)
1122 # If quota_ram is unlimited [-1]:
1123 # - set ram headroom based on instances headroom:
1124 if quotas.get('ram') == -1:
1125 if deltas.get('ram'): 1125 ↛ 1129line 1125 didn't jump to line 1129 because the condition on line 1125 was always true
1126 hr = headroom.get('instances', 1) * deltas['ram']
1127 headroom['ram'] = hr / deltas.get('instances', 1)
1128 else:
1129 headroom['ram'] = headroom.get('instances', 1)
1131 return headroom
1134def check_num_instances_quota(
1135 context, flavor, min_count, max_count, project_id=None, user_id=None,
1136 orig_num_req=None,
1137):
1138 """Enforce quota limits on number of instances created."""
1139 # project_id is also used for the TooManyInstances error message
1140 if project_id is None:
1141 project_id = context.project_id
1142 if user_id is None:
1143 user_id = context.user_id
1144 # Check whether we need to count resources per-user and check a per-user
1145 # quota limit. If we have no per-user quota limit defined for a
1146 # project/user, we can avoid wasteful resource counting.
1147 user_quotas = objects.Quotas.get_all_by_project_and_user(
1148 context, project_id, user_id)
1149 if not any(r in user_quotas for r in ['instances', 'cores', 'ram']):
1150 user_id = None
1151 # Determine requested cores and ram
1152 req_cores = max_count * flavor.vcpus
1153 req_ram = max_count * flavor.memory_mb
1154 deltas = {'instances': max_count, 'cores': req_cores, 'ram': req_ram}
1156 try:
1157 # NOTE(johngarbutt) when using unified limits, this is call
1158 # is a no op, and as such, this function always returns max_count
1159 objects.Quotas.check_deltas(context, deltas,
1160 project_id, user_id=user_id,
1161 check_project_id=project_id,
1162 check_user_id=user_id)
1163 except exception.OverQuota as exc:
1164 quotas = exc.kwargs['quotas']
1165 overs = exc.kwargs['overs']
1166 usages = exc.kwargs['usages']
1167 # This is for the recheck quota case where we used a delta of zero.
1168 if min_count == max_count == 0:
1169 # orig_num_req is the original number of instances requested in the
1170 # case of a recheck quota, for use in the over quota exception.
1171 req_cores = orig_num_req * flavor.vcpus
1172 req_ram = orig_num_req * flavor.memory_mb
1173 requested = {'instances': orig_num_req, 'cores': req_cores,
1174 'ram': req_ram}
1175 (overs, reqs, total_alloweds, useds) = get_over_quota_detail(
1176 deltas, overs, quotas, requested)
1177 msg = "Cannot run any more instances of this type."
1178 params = {'overs': overs, 'pid': project_id, 'msg': msg}
1179 LOG.debug("%(overs)s quota exceeded for %(pid)s. %(msg)s",
1180 params)
1181 raise exception.TooManyInstances(overs=overs,
1182 req=reqs,
1183 used=useds,
1184 allowed=total_alloweds)
1185 # OK, we exceeded quota; let's figure out why...
1186 headroom = get_headroom(quotas, usages, deltas)
1188 allowed = headroom.get('instances', 1)
1189 # Reduce 'allowed' instances in line with the cores & ram headroom
1190 if flavor.vcpus: 1190 ↛ 1192line 1190 didn't jump to line 1192 because the condition on line 1190 was always true
1191 allowed = min(allowed, headroom['cores'] // flavor.vcpus)
1192 if flavor.memory_mb: 1192 ↛ 1196line 1192 didn't jump to line 1196 because the condition on line 1192 was always true
1193 allowed = min(allowed, headroom['ram'] // flavor.memory_mb)
1195 # Convert to the appropriate exception message
1196 if allowed <= 0:
1197 msg = "Cannot run any more instances of this type."
1198 elif min_count <= allowed <= max_count:
1199 # We're actually OK, but still need to check against allowed
1200 return check_num_instances_quota(
1201 context, flavor, min_count, allowed, project_id=project_id,
1202 user_id=user_id)
1203 else:
1204 msg = "Can only run %s more instances of this type." % allowed
1206 num_instances = (str(min_count) if min_count == max_count else
1207 "%s-%s" % (min_count, max_count))
1208 requested = dict(instances=num_instances, cores=req_cores,
1209 ram=req_ram)
1210 (overs, reqs, total_alloweds, useds) = get_over_quota_detail(
1211 headroom, overs, quotas, requested)
1212 params = {'overs': overs, 'pid': project_id,
1213 'min_count': min_count, 'max_count': max_count,
1214 'msg': msg}
1216 if min_count == max_count:
1217 LOG.debug("%(overs)s quota exceeded for %(pid)s,"
1218 " tried to run %(min_count)d instances. "
1219 "%(msg)s", params)
1220 else:
1221 LOG.debug("%(overs)s quota exceeded for %(pid)s,"
1222 " tried to run between %(min_count)d and"
1223 " %(max_count)d instances. %(msg)s",
1224 params)
1225 raise exception.TooManyInstances(overs=overs,
1226 req=reqs,
1227 used=useds,
1228 allowed=total_alloweds)
1230 return max_count
1233def get_over_quota_detail(headroom, overs, quotas, requested):
1234 reqs = []
1235 useds = []
1236 total_alloweds = []
1237 for resource in overs:
1238 reqs.append(str(requested[resource]))
1239 useds.append(str(quotas[resource] - headroom[resource]))
1240 total_alloweds.append(str(quotas[resource]))
1241 (overs, reqs, useds, total_alloweds) = map(', '.join, (
1242 overs, reqs, useds, total_alloweds))
1243 return overs, reqs, total_alloweds, useds
1246def remove_shelved_keys_from_system_metadata(instance):
1247 # Delete system_metadata for a shelved instance
1248 for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
1249 if key in instance.system_metadata:
1250 del (instance.system_metadata[key])
1253def create_image(context, instance, name, image_type, image_api,
1254 extra_properties=None):
1255 """Create new image entry in the image service. This new image
1256 will be reserved for the compute manager to upload a snapshot
1257 or backup.
1259 :param context: security context
1260 :param instance: nova.objects.instance.Instance object
1261 :param name: string for name of the snapshot
1262 :param image_type: snapshot | backup
1263 :param image_api: instance of nova.image.glance.API
1264 :param extra_properties: dict of extra image properties to include
1266 """
1267 properties = {
1268 'instance_uuid': instance.uuid,
1269 'user_id': str(context.user_id),
1270 'image_type': image_type,
1271 }
1272 properties.update(extra_properties or {})
1274 image_meta = initialize_instance_snapshot_metadata(
1275 context, instance, name, properties)
1276 # if we're making a snapshot, omit the disk and container formats,
1277 # since the image may have been converted to another format, and the
1278 # original values won't be accurate. The driver will populate these
1279 # with the correct values later, on image upload.
1280 if image_type == 'snapshot':
1281 image_meta.pop('disk_format', None)
1282 image_meta.pop('container_format', None)
1283 return image_api.create(context, image_meta)
1286def initialize_instance_snapshot_metadata(context, instance, name,
1287 extra_properties=None):
1288 """Initialize new metadata for a snapshot of the given instance.
1290 :param context: authenticated RequestContext; note that this may not
1291 be the owner of the instance itself, e.g. an admin creates a
1292 snapshot image of some user instance
1293 :param instance: nova.objects.instance.Instance object
1294 :param name: string for name of the snapshot
1295 :param extra_properties: dict of extra metadata properties to include
1297 :returns: the new instance snapshot metadata
1298 """
1299 image_meta = utils.get_image_from_system_metadata(
1300 instance.system_metadata)
1301 image_meta['name'] = name
1303 # If the user creating the snapshot is not in the same project as
1304 # the owner of the instance, then the image visibility should be
1305 # "shared" so the owner of the instance has access to the image, like
1306 # in the case of an admin creating a snapshot of another user's
1307 # server, either directly via the createImage API or via shelve.
1308 extra_properties = extra_properties or {}
1309 if context.project_id != instance.project_id: 1309 ↛ 1312line 1309 didn't jump to line 1312 because the condition on line 1309 was never true
1310 # The glance API client-side code will use this to add the
1311 # instance project as a member of the image for access.
1312 image_meta['visibility'] = 'shared'
1313 extra_properties['instance_owner'] = instance.project_id
1314 # TODO(mriedem): Should owner_project_name and owner_user_name
1315 # be removed from image_meta['properties'] here, or added to
1316 # [DEFAULT]/non_inheritable_image_properties? It is confusing
1317 # otherwise to see the owner project not match those values.
1318 else:
1319 # The request comes from the owner of the instance so make the
1320 # image private.
1321 image_meta['visibility'] = 'private'
1323 # Delete properties that are non-inheritable
1324 properties = image_meta['properties']
1325 keys_to_pop = set(CONF.non_inheritable_image_properties).union(
1326 NON_INHERITABLE_IMAGE_PROPERTIES)
1327 for ns in NON_INHERITABLE_IMAGE_NAMESPACES:
1328 keys_to_pop |= {key for key in properties
1329 if key.startswith(ns)}
1330 for key in keys_to_pop:
1331 properties.pop(key, None)
1333 # The properties in extra_properties have precedence
1334 properties.update(extra_properties)
1336 return image_meta
1339def delete_image(context, instance, image_api, image_id, log_exc_info=False):
1340 """Deletes the image if it still exists.
1342 Ignores ImageNotFound if the image is already gone.
1344 :param context: the nova auth request context where the context.project_id
1345 matches the owner of the image
1346 :param instance: the instance for which the snapshot image was created
1347 :param image_api: the image API used to delete the image
1348 :param image_id: the ID of the image to delete
1349 :param log_exc_info: True if this is being called from an exception handler
1350 block and traceback should be logged at DEBUG level, False otherwise.
1351 """
1352 LOG.debug("Cleaning up image %s", image_id, instance=instance,
1353 log_exc_info=log_exc_info)
1354 try:
1355 image_api.delete(context, image_id)
1356 except exception.ImageNotFound:
1357 # Since we're trying to cleanup an image, we don't care if
1358 # if it's already gone.
1359 pass
1360 except Exception:
1361 LOG.exception("Error while trying to clean up image %s",
1362 image_id, instance=instance)
1365def may_have_ports_or_volumes(instance):
1366 """Checks to see if an instance may have ports or volumes based on vm_state
1368 This is primarily only useful when instance.host is None.
1370 :param instance: The nova.objects.Instance in question.
1371 :returns: True if the instance may have ports of volumes, False otherwise
1372 """
1373 # NOTE(melwitt): When an instance build fails in the compute manager,
1374 # the instance host and node are set to None and the vm_state is set
1375 # to ERROR. In the case, the instance with host = None has actually
1376 # been scheduled and may have ports and/or volumes allocated on the
1377 # compute node.
1378 if instance.vm_state in (vm_states.SHELVED_OFFLOADED, vm_states.ERROR):
1379 return True
1380 return False
1383def get_stashed_volume_connector(bdm, instance):
1384 """Lookup a connector dict from the bdm.connection_info if set
1386 Gets the stashed connector dict out of the bdm.connection_info if set
1387 and the connector host matches the instance host.
1389 :param bdm: nova.objects.block_device.BlockDeviceMapping
1390 :param instance: nova.objects.instance.Instance
1391 :returns: volume connector dict or None
1392 """
1393 if 'connection_info' in bdm and bdm.connection_info is not None:
1394 # NOTE(mriedem): We didn't start stashing the connector in the
1395 # bdm.connection_info until Mitaka so it might not be there on old
1396 # attachments. Also, if the volume was attached when the instance
1397 # was in shelved_offloaded state and it hasn't been unshelved yet
1398 # we don't have the attachment/connection information either.
1399 connector = jsonutils.loads(bdm.connection_info).get('connector')
1400 if connector:
1401 if connector.get('host') == instance.host:
1402 return connector
1403 LOG.debug('Found stashed volume connector for instance but '
1404 'connector host %(connector_host)s does not match '
1405 'the instance host %(instance_host)s.',
1406 {'connector_host': connector.get('host'),
1407 'instance_host': instance.host}, instance=instance)
1408 if (instance.host is None and
1409 may_have_ports_or_volumes(instance)):
1410 LOG.debug('Allowing use of stashed volume connector with '
1411 'instance host None because instance with '
1412 'vm_state %(vm_state)s has been scheduled in '
1413 'the past.', {'vm_state': instance.vm_state},
1414 instance=instance)
1415 return connector
1418class EventReporter(object):
1419 """Context manager to report instance action events.
1421 If constructed with ``graceful_exit=True`` the __exit__ function will
1422 handle and not re-raise on InstanceActionNotFound.
1423 """
1425 def __init__(self, context, event_name, host, *instance_uuids,
1426 graceful_exit=False):
1427 self.context = context
1428 self.event_name = event_name
1429 self.instance_uuids = instance_uuids
1430 self.host = host
1431 self.graceful_exit = graceful_exit
1433 def __enter__(self):
1434 for uuid in self.instance_uuids:
1435 objects.InstanceActionEvent.event_start(
1436 self.context, uuid, self.event_name, want_result=False,
1437 host=self.host)
1439 return self
1441 def __exit__(self, exc_type, exc_val, exc_tb):
1442 for uuid in self.instance_uuids:
1443 try:
1444 objects.InstanceActionEvent.event_finish_with_failure(
1445 self.context, uuid, self.event_name, exc_val=exc_val,
1446 exc_tb=exc_tb, want_result=False)
1447 except exception.InstanceActionNotFound:
1448 # If the instance action was not found then determine if we
1449 # should re-raise based on the graceful_exit attribute.
1450 with excutils.save_and_reraise_exception(
1451 reraise=not self.graceful_exit):
1452 if self.graceful_exit: 1452 ↛ 1442line 1452 didn't jump to line 1442
1453 return True
1454 return False
1457def wrap_instance_event(prefix, graceful_exit=False):
1458 """Wraps a method to log the event taken on the instance, and result.
1460 This decorator wraps a method to log the start and result of an event, as
1461 part of an action taken on an instance.
1463 :param prefix: prefix for the event name, usually a service binary like
1464 "compute" or "conductor" to indicate the origin of the event.
1465 :param graceful_exit: True if the decorator should gracefully handle
1466 InstanceActionNotFound errors, False otherwise. This should rarely be
1467 True.
1468 """
1469 @utils.expects_func_args('instance')
1470 def helper(function):
1472 @functools.wraps(function)
1473 def decorated_function(self, context, *args, **kwargs):
1474 wrapped_func = safe_utils.get_wrapped_function(function)
1475 keyed_args = inspect.getcallargs(wrapped_func, self, context,
1476 *args, **kwargs)
1477 instance_uuid = keyed_args['instance']['uuid']
1479 event_name = '{0}_{1}'.format(prefix, function.__name__)
1480 host = self.host if hasattr(self, 'host') else None
1481 with EventReporter(context, event_name, host, instance_uuid,
1482 graceful_exit=graceful_exit):
1483 return function(self, context, *args, **kwargs)
1484 return decorated_function
1485 return helper
1488class UnlimitedSemaphore(object):
1489 def __enter__(self):
1490 pass
1492 def __exit__(self, exc_type, exc_val, exc_tb):
1493 pass
1495 @property
1496 def balance(self):
1497 return 0
1500# This semaphore is used to enforce a limit on disk-IO-intensive operations
1501# (image downloads, image conversions) at any given time.
1502# It is initialized at ComputeManager.init_host()
1503disk_ops_semaphore = UnlimitedSemaphore()
1506@contextlib.contextmanager
1507def notify_about_instance_delete(notifier, context, instance,
1508 delete_type='delete',
1509 source=fields.NotificationSource.API):
1510 try:
1511 notify_about_instance_usage(notifier, context, instance,
1512 "%s.start" % delete_type)
1513 # Note(gibi): force_delete types will be handled in a
1514 # subsequent patch
1515 if delete_type in ['delete', 'soft_delete']: 1515 ↛ 1524line 1515 didn't jump to line 1524 because the condition on line 1515 was always true
1516 notify_about_instance_action(
1517 context,
1518 instance,
1519 host=CONF.host,
1520 source=source,
1521 action=delete_type,
1522 phase=fields.NotificationPhase.START)
1524 yield
1525 finally:
1526 notify_about_instance_usage(notifier, context, instance,
1527 "%s.end" % delete_type)
1528 if delete_type in ['delete', 'soft_delete']: 1528 ↛ exitline 1528 didn't return from function 'notify_about_instance_delete' because the condition on line 1528 was always true
1529 notify_about_instance_action(
1530 context,
1531 instance,
1532 host=CONF.host,
1533 source=source,
1534 action=delete_type,
1535 phase=fields.NotificationPhase.END)
1538def update_pci_request_with_placement_allocations(
1539 context, report_client, pci_requests, provider_mapping):
1540 """Update the instance's PCI request based on the request group -
1541 resource provider mapping and the device RP name from placement.
1543 :param context: the request context
1544 :param report_client: a SchedulerReportClient instance
1545 :param pci_requests: A list of InstancePCIRequest objects to be updated
1546 :param provider_mapping: the request group - resource provider mapping
1547 in the form returned by the RequestSpec.get_request_group_mapping()
1548 call.
1549 :raises AmbigousResourceProviderForPCIRequest: if more than one
1550 resource provider provides resource for the given PCI request.
1551 :raises UnexpectResourceProviderNameForPCIRequest: if the resource
1552 provider, which provides resource for the pci request, does not
1553 have a well formatted name so we cannot parse the parent interface
1554 name out of it.
1555 """
1556 if not pci_requests:
1557 return
1559 def needs_update_due_to_qos(pci_request, mapping):
1560 return (pci_request.requester_id and
1561 pci_request.requester_id in mapping)
1563 def get_group_mapping_for_flavor_based_pci_request(pci_request, mapping):
1564 # NOTE(gibi): for flavor based PCI requests nova generates RequestGroup
1565 # suffixes from InstancePCIRequests in the form of
1566 # {request_id}-{count_index}
1567 # NOTE(gibi): a suffixed request group always fulfilled from a single
1568 # RP
1569 return {
1570 group_id: rp_uuids[0]
1571 for group_id, rp_uuids in mapping.items()
1572 if group_id.startswith(pci_request.request_id)
1573 }
1575 for pci_request in pci_requests:
1576 mapping = get_group_mapping_for_flavor_based_pci_request(
1577 pci_request, provider_mapping)
1579 if mapping:
1580 for spec in pci_request.spec:
1581 # FIXME(gibi): this is baaad but spec is a dict of strings so
1582 # we need to serialize
1583 spec['rp_uuids'] = ','.join(mapping.values())
1585 elif needs_update_due_to_qos(pci_request, provider_mapping):
1587 provider_uuids = provider_mapping[pci_request.requester_id]
1588 if len(provider_uuids) != 1:
1589 raise exception.AmbiguousResourceProviderForPCIRequest(
1590 providers=provider_uuids,
1591 requester=pci_request.requester_id)
1593 dev_rp_name = report_client.get_resource_provider_name(
1594 context,
1595 provider_uuids[0])
1597 # NOTE(gibi): the device RP name reported by neutron is
1598 # structured like <hostname>:<agentname>:<interfacename>
1599 rp_name_pieces = dev_rp_name.split(':')
1600 if len(rp_name_pieces) != 3:
1601 ex = exception.UnexpectedResourceProviderNameForPCIRequest
1602 raise ex(
1603 provider=provider_uuids[0],
1604 requester=pci_request.requester_id,
1605 provider_name=dev_rp_name)
1607 for spec in pci_request.spec:
1608 spec['parent_ifname'] = rp_name_pieces[2]
1611def delete_arqs_if_needed(context, instance, arq_uuids=None):
1612 """Delete Cyborg ARQs for the instance.
1614 :param context
1615 :param instance: instance who own the args
1616 :param uuids: delete arqs by uuids while did not bind to instance yet.
1617 """
1618 cyclient = cyborg.get_client(context)
1619 dp_name = instance.flavor.extra_specs.get('accel:device_profile')
1621 if dp_name:
1622 LOG.debug('Calling Cyborg to delete ARQs for instance %(instance)s',
1623 {'instance': instance.uuid})
1624 try:
1625 cyclient.delete_arqs_for_instance(instance.uuid)
1626 except exception.AcceleratorRequestOpFailed as e:
1627 LOG.exception('Failed to delete accelerator requests for '
1628 'instance %s. Exception: %s', instance.uuid, e)
1630 if arq_uuids:
1631 LOG.debug('Calling Cyborg to delete ARQs by uuids %(uuid)s for'
1632 ' instance %(instance)s',
1633 {'instance': instance.uuid,
1634 'uuid': arq_uuids})
1635 cyclient.delete_arqs_by_uuid(arq_uuids)