Coverage for nova/utils.py: 92%
537 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
1# Copyright 2010 United States Government as represented by the
2# Administrator of the National Aeronautics and Space Administration.
3# Copyright 2011 Justin Santa Barbara
4# All Rights Reserved.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
18"""Utilities and helper functions."""
20import contextlib
21import datetime
22import functools
23import hashlib
24import inspect
25import os
26import random
27import re
28import shutil
29import tempfile
31import eventlet
32from eventlet import tpool
33from keystoneauth1 import loading as ks_loading
34import netaddr
35from openstack import connection
36from openstack import exceptions as sdk_exc
37import os_resource_classes as orc
38from os_service_types import service_types
39from oslo_concurrency import lockutils
40from oslo_concurrency import processutils
41from oslo_context import context as common_context
42from oslo_log import log as logging
43import oslo_messaging as messaging
44from oslo_utils import encodeutils
45from oslo_utils import excutils
46from oslo_utils import importutils
47from oslo_utils import strutils
48from oslo_utils import timeutils
50import nova.conf
51from nova import exception
52from nova.i18n import _
53from nova import safe_utils
55profiler = importutils.try_import('osprofiler.profiler')
58CONF = nova.conf.CONF
60LOG = logging.getLogger(__name__)
62synchronized = lockutils.synchronized_with_prefix('nova-')
64SM_IMAGE_PROP_PREFIX = "image_"
65SM_INHERITABLE_KEYS = (
66 'min_ram', 'min_disk', 'disk_format', 'container_format',
67)
68# Keys which hold large structured data that won't fit in the
69# size constraints of the system_metadata table, so we avoid
70# storing and/or loading them.
71SM_SKIP_KEYS = (
72 # Legacy names
73 'mappings', 'block_device_mapping',
74 # Modern names
75 'img_mappings', 'img_block_device_mapping',
76)
78_FILE_CACHE = {}
80_SERVICE_TYPES = service_types.ServiceTypes()
82DEFAULT_GREEN_POOL = None
85def _get_default_green_pool():
86 global DEFAULT_GREEN_POOL
87 if DEFAULT_GREEN_POOL is None:
88 DEFAULT_GREEN_POOL = eventlet.greenpool.GreenPool(
89 CONF.default_green_pool_size
90 )
91 return DEFAULT_GREEN_POOL
94# NOTE(mikal): this seems to have to stay for now to handle os-brick
95# requirements. This makes me a sad panda.
96def get_root_helper():
97 if CONF.workarounds.disable_rootwrap: 97 ↛ 98line 97 didn't jump to line 98 because the condition on line 97 was never true
98 cmd = 'sudo'
99 else:
100 cmd = 'sudo nova-rootwrap %s' % CONF.rootwrap_config
101 return cmd
104def ssh_execute(dest, *cmd, **kwargs):
105 """Convenience wrapper to execute ssh command."""
106 ssh_cmd = ['ssh', '-o', 'BatchMode=yes']
107 ssh_cmd.append(dest)
108 ssh_cmd.extend(cmd)
109 return processutils.execute(*ssh_cmd, **kwargs)
112def generate_uid(topic, size=8):
113 random_string = generate_random_string(size)
114 return '%s-%s' % (topic, random_string)
117def generate_random_string(size=8):
118 characters = '01234567890abcdefghijklmnopqrstuvwxyz'
119 return ''.join([random.choice(characters) for _x in range(size)])
122# Default symbols to use for passwords. Avoids visually confusing characters.
123# ~6 bits per symbol
124DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
125 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
126 'abcdefghijkmnopqrstuvwxyz') # Removed: l
129def last_completed_audit_period(unit=None, before=None):
130 """This method gives you the most recently *completed* audit period.
132 arguments:
133 units: string, one of 'hour', 'day', 'month', 'year'
134 Periods normally begin at the beginning (UTC) of the
135 period unit (So a 'day' period begins at midnight UTC,
136 a 'month' unit on the 1st, a 'year' on Jan, 1)
137 unit string may be appended with an optional offset
138 like so: 'day@18' This will begin the period at 18:00
139 UTC. 'month@15' starts a monthly period on the 15th,
140 and year@3 begins a yearly one on March 1st.
141 before: Give the audit period most recently completed before
142 <timestamp>. Defaults to now.
145 returns: 2 tuple of datetimes (begin, end)
146 The begin timestamp of this audit period is the same as the
147 end of the previous.
148 """
149 if not unit:
150 unit = CONF.instance_usage_audit_period
152 offset = 0
153 if '@' in unit:
154 unit, offset = unit.split("@", 1)
155 offset = int(offset)
157 if before is not None:
158 rightnow = before
159 else:
160 rightnow = timeutils.utcnow()
161 if unit == 'month':
162 if offset == 0:
163 offset = 1
164 end = datetime.datetime(day=offset,
165 month=rightnow.month,
166 year=rightnow.year)
167 if end >= rightnow:
168 year = rightnow.year
169 if 1 >= rightnow.month: 169 ↛ 170line 169 didn't jump to line 170 because the condition on line 169 was never true
170 year -= 1
171 month = 12 + (rightnow.month - 1)
172 else:
173 month = rightnow.month - 1
174 end = datetime.datetime(day=offset,
175 month=month,
176 year=year)
177 year = end.year
178 if 1 >= end.month: 178 ↛ 179line 178 didn't jump to line 179 because the condition on line 178 was never true
179 year -= 1
180 month = 12 + (end.month - 1)
181 else:
182 month = end.month - 1
183 begin = datetime.datetime(day=offset, month=month, year=year)
185 elif unit == 'year':
186 if offset == 0:
187 offset = 1
188 end = datetime.datetime(day=1, month=offset, year=rightnow.year)
189 if end >= rightnow:
190 end = datetime.datetime(day=1,
191 month=offset,
192 year=rightnow.year - 1)
193 begin = datetime.datetime(day=1,
194 month=offset,
195 year=rightnow.year - 2)
196 else:
197 begin = datetime.datetime(day=1,
198 month=offset,
199 year=rightnow.year - 1)
201 elif unit == 'day':
202 end = datetime.datetime(hour=offset,
203 day=rightnow.day,
204 month=rightnow.month,
205 year=rightnow.year)
206 if end >= rightnow:
207 end = end - datetime.timedelta(days=1)
208 begin = end - datetime.timedelta(days=1)
210 else: # unit == 'hour'
211 end = rightnow.replace(minute=offset, second=0, microsecond=0)
212 if end >= rightnow:
213 end = end - datetime.timedelta(hours=1)
214 begin = end - datetime.timedelta(hours=1)
216 return (begin, end)
219def generate_password(length=None, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
220 """Generate a random password from the supplied symbol groups.
222 At least one symbol from each group will be included. Unpredictable
223 results if length is less than the number of symbol groups.
225 Believed to be reasonably secure (with a reasonable password length!)
227 """
228 if length is None: 228 ↛ 231line 228 didn't jump to line 231 because the condition on line 228 was always true
229 length = CONF.password_length
231 r = random.SystemRandom()
233 # NOTE(jerdfelt): Some password policies require at least one character
234 # from each group of symbols, so start off with one random character
235 # from each symbol group
236 password = [r.choice(s) for s in symbolgroups]
237 # If length < len(symbolgroups), the leading characters will only
238 # be from the first length groups. Try our best to not be predictable
239 # by shuffling and then truncating.
240 r.shuffle(password)
241 password = password[:length]
242 length -= len(password)
244 # then fill with random characters from all symbol groups
245 symbols = ''.join(symbolgroups)
246 password.extend([r.choice(symbols) for _i in range(length)])
248 # finally shuffle to ensure first x characters aren't from a
249 # predictable group
250 r.shuffle(password)
252 return ''.join(password)
255# TODO(sfinucan): Replace this with the equivalent from oslo.utils
256def utf8(value):
257 """Try to turn a string into utf-8 if possible.
259 The original code was copied from the utf8 function in
260 http://github.com/facebook/tornado/blob/master/tornado/escape.py
262 """
263 if value is None or isinstance(value, bytes):
264 return value
266 if not isinstance(value, str):
267 value = str(value)
269 return value.encode('utf-8')
272def parse_server_string(server_str):
273 """Parses the given server_string and returns a tuple of host and port.
274 If it's not a combination of host part and port, the port element
275 is an empty string. If the input is invalid expression, return a tuple of
276 two empty strings.
277 """
278 try:
279 # First of all, exclude pure IPv6 address (w/o port).
280 if netaddr.valid_ipv6(server_str):
281 return (server_str, '')
283 # Next, check if this is IPv6 address with a port number combination.
284 if server_str.find("]:") != -1:
285 (address, port) = server_str.replace('[', '', 1).split(']:')
286 return (address, port)
288 # Third, check if this is a combination of an address and a port
289 if server_str.find(':') == -1:
290 return (server_str, '')
292 # This must be a combination of an address and a port
293 (address, port) = server_str.split(':')
294 return (address, port)
296 except (ValueError, netaddr.AddrFormatError):
297 LOG.error('Invalid server_string: %s', server_str)
298 return ('', '')
301def get_shortened_ipv6(address):
302 addr = netaddr.IPAddress(address, version=6)
303 return str(addr.ipv6())
306def get_shortened_ipv6_cidr(address):
307 net = netaddr.IPNetwork(address, version=6)
308 return str(net.cidr)
311def safe_ip_format(ip):
312 """Transform ip string to "safe" format.
314 Will return ipv4 addresses unchanged, but will nest ipv6 addresses
315 inside square brackets.
316 """
317 try:
318 if netaddr.IPAddress(ip).version == 6:
319 return '[%s]' % ip
320 except (TypeError, netaddr.AddrFormatError): # hostname
321 pass
322 # it's IPv4 or hostname
323 return ip
326def format_remote_path(host, path):
327 """Returns remote path in format acceptable for scp/rsync.
329 If host is IPv6 address literal, return '[host]:path', otherwise
330 'host:path' is returned.
332 If host is None, only path is returned.
333 """
334 if host is None:
335 return path
337 return "%s:%s" % (safe_ip_format(host), path)
340def make_dev_path(dev, partition=None, base='/dev'):
341 """Return a path to a particular device.
343 >>> make_dev_path('xvdc')
344 /dev/xvdc
346 >>> make_dev_path('xvdc', 1)
347 /dev/xvdc1
348 """
349 path = os.path.join(base, dev)
350 if partition:
351 path += str(partition)
352 return path
355def sanitize_hostname(hostname, default_name=None):
356 """Sanitize a given hostname.
358 Return a hostname which conforms to RFC-952 and RFC-1123 specs except the
359 length of hostname. Window, Linux, and dnsmasq has different limitation:
361 - Windows: 255 (net_bios limits to 15, but window will truncate it)
362 - Linux: 64
363 - dnsmasq: 63
365 We choose the lowest of these (so 63).
366 """
368 def truncate_hostname(name):
369 if len(name) > 63:
370 LOG.warning("Hostname %(hostname)s is longer than 63, "
371 "truncate it to %(truncated_name)s",
372 {'hostname': name, 'truncated_name': name[:63]})
373 return name[:63]
375 if isinstance(hostname, str): 375 ↛ 379line 375 didn't jump to line 379 because the condition on line 375 was always true
376 # Remove characters outside the Unicode range U+0000-U+00FF
377 hostname = hostname.encode('latin-1', 'ignore').decode('latin-1')
379 hostname = truncate_hostname(hostname)
380 hostname = re.sub(r'[ _\.]', '-', hostname)
381 hostname = re.sub(r'[^\w.-]+', '', hostname)
382 hostname = hostname.lower()
383 hostname = hostname.strip('.-')
384 # NOTE(eliqiao): set hostname to default_display_name to avoid
385 # empty hostname
386 if hostname == "" and default_name is not None:
387 return truncate_hostname(default_name)
388 return hostname
391@contextlib.contextmanager
392def temporary_mutation(obj, **kwargs):
393 """Temporarily set the attr on a particular object to a given value then
394 revert when finished.
396 One use of this is to temporarily set the read_deleted flag on a context
397 object:
399 with temporary_mutation(context, read_deleted="yes"):
400 do_something_that_needed_deleted_objects()
401 """
402 def is_dict_like(thing):
403 return hasattr(thing, 'has_key') or isinstance(thing, dict)
405 def get(thing, attr, default):
406 if is_dict_like(thing):
407 return thing.get(attr, default)
408 else:
409 return getattr(thing, attr, default)
411 def set_value(thing, attr, val):
412 if is_dict_like(thing):
413 thing[attr] = val
414 else:
415 setattr(thing, attr, val)
417 def delete(thing, attr):
418 if is_dict_like(thing):
419 del thing[attr]
420 else:
421 delattr(thing, attr)
423 NOT_PRESENT = object()
425 old_values = {}
426 for attr, new_value in kwargs.items():
427 old_values[attr] = get(obj, attr, NOT_PRESENT)
428 set_value(obj, attr, new_value)
430 try:
431 yield
432 finally:
433 for attr, old_value in old_values.items():
434 if old_value is NOT_PRESENT:
435 delete(obj, attr)
436 else:
437 set_value(obj, attr, old_value)
440def generate_mac_address():
441 """Generate an Ethernet MAC address."""
442 # NOTE(vish): We would prefer to use 0xfe here to ensure that linux
443 # bridge mac addresses don't change, but it appears to
444 # conflict with libvirt, so we use the next highest octet
445 # that has the unicast and locally administered bits set
446 # properly: 0xfa.
447 # Discussion: https://bugs.launchpad.net/nova/+bug/921838
448 mac = [0xfa, 0x16, 0x3e,
449 random.randint(0x00, 0xff),
450 random.randint(0x00, 0xff),
451 random.randint(0x00, 0xff)]
452 return ':'.join(map(lambda x: "%02x" % x, mac))
455# NOTE(mikal): I really wanted this code to go away, but I can't find a way
456# to implement what the callers of this method want with privsep. Basically,
457# if we could hand off either a file descriptor or a file like object then
458# we could make this go away.
459@contextlib.contextmanager
460def temporary_chown(path, owner_uid=None):
461 """Temporarily chown a path.
463 :param owner_uid: UID of temporary owner (defaults to current user)
464 """
465 if owner_uid is None: 465 ↛ 466line 465 didn't jump to line 466 because the condition on line 465 was never true
466 owner_uid = os.getuid()
468 orig_uid = os.stat(path).st_uid
470 if orig_uid != owner_uid: 470 ↛ 472line 470 didn't jump to line 472 because the condition on line 470 was always true
471 nova.privsep.path.chown(path, uid=owner_uid)
472 try:
473 yield
474 finally:
475 if orig_uid != owner_uid: 475 ↛ exitline 475 didn't return from function 'temporary_chown' because the condition on line 475 was always true
476 nova.privsep.path.chown(path, uid=orig_uid)
479@contextlib.contextmanager
480def tempdir(**kwargs):
481 argdict = kwargs.copy()
482 if 'dir' not in argdict:
483 argdict['dir'] = CONF.tempdir
484 tmpdir = tempfile.mkdtemp(**argdict)
485 try:
486 yield tmpdir
487 finally:
488 try:
489 shutil.rmtree(tmpdir)
490 except OSError as e:
491 LOG.error('Could not remove tmpdir: %s', e)
494class UndoManager(object):
495 """Provides a mechanism to facilitate rolling back a series of actions
496 when an exception is raised.
497 """
499 def __init__(self):
500 self.undo_stack = []
502 def undo_with(self, undo_func):
503 self.undo_stack.append(undo_func)
505 def _rollback(self):
506 for undo_func in reversed(self.undo_stack):
507 undo_func()
509 def rollback_and_reraise(self, msg=None, **kwargs):
510 """Rollback a series of actions then re-raise the exception.
512 .. note:: (sirp) This should only be called within an
513 exception handler.
514 """
515 with excutils.save_and_reraise_exception():
516 if msg:
517 LOG.exception(msg, **kwargs)
519 self._rollback()
522def metadata_to_dict(metadata, include_deleted=False):
523 result = {}
524 for item in metadata:
525 if not include_deleted and item.get('deleted'):
526 continue
527 result[item['key']] = item['value']
528 return result
531def dict_to_metadata(metadata):
532 result = []
533 for key, value in metadata.items():
534 result.append(dict(key=key, value=value))
535 return result
538def instance_meta(instance):
539 if isinstance(instance['metadata'], dict):
540 return instance['metadata']
541 else:
542 return metadata_to_dict(instance['metadata'])
545# TODO(stephenfin): Instance.system_metadata is always a dict now (thanks,
546# o.vo) so this check (and the function as a whole) can be removed
547def instance_sys_meta(instance):
548 if not instance.get('system_metadata'):
549 return {}
550 if isinstance(instance['system_metadata'], dict):
551 return instance['system_metadata']
552 else:
553 return metadata_to_dict(instance['system_metadata'],
554 include_deleted=True)
557def expects_func_args(*args):
558 def _decorator_checker(dec):
559 @functools.wraps(dec)
560 def _decorator(f):
561 base_f = safe_utils.get_wrapped_function(f)
562 argspec = inspect.getfullargspec(base_f)
563 if argspec[1] or argspec[2] or set(args) <= set(argspec[0]):
564 # NOTE (ndipanov): We can't really tell if correct stuff will
565 # be passed if it's a function with *args or **kwargs so
566 # we still carry on and hope for the best
567 return dec(f)
568 else:
569 raise TypeError("Decorated function %(f_name)s does not "
570 "have the arguments expected by the "
571 "decorator %(d_name)s" %
572 {'f_name': base_f.__name__,
573 'd_name': dec.__name__})
574 return _decorator
575 return _decorator_checker
578class ExceptionHelper(object):
579 """Class to wrap another and translate the ClientExceptions raised by its
580 function calls to the actual ones.
581 """
583 def __init__(self, target):
584 self._target = target
586 def __getattr__(self, name):
587 func = getattr(self._target, name)
589 @functools.wraps(func)
590 def wrapper(*args, **kwargs):
591 try:
592 return func(*args, **kwargs)
593 except messaging.ExpectedException as e:
594 raise e.exc_info[1]
595 return wrapper
598def check_string_length(value, name=None, min_length=0, max_length=None):
599 """Check the length of specified string
600 :param value: the value of the string
601 :param name: the name of the string
602 :param min_length: the min_length of the string
603 :param max_length: the max_length of the string
604 """
605 try:
606 strutils.check_string_length(value, name=name,
607 min_length=min_length,
608 max_length=max_length)
609 except (ValueError, TypeError) as exc:
610 raise exception.InvalidInput(message=exc.args[0])
613def validate_integer(value, name, min_value=None, max_value=None):
614 """Make sure that value is a valid integer, potentially within range.
616 :param value: value of the integer
617 :param name: name of the integer
618 :param min_value: min_value of the integer
619 :param max_value: max_value of the integer
620 :returns: integer
621 :raise: InvalidInput If value is not a valid integer
622 """
623 try:
624 return strutils.validate_integer(value, name, min_value, max_value)
625 except ValueError as e:
626 raise exception.InvalidInput(reason=str(e))
629def _serialize_profile_info():
630 if not profiler: 630 ↛ 631line 630 didn't jump to line 631 because the condition on line 630 was never true
631 return None
632 prof = profiler.get()
633 trace_info = None
634 if prof: 634 ↛ 637line 634 didn't jump to line 637 because the condition on line 634 was never true
635 # FIXME(DinaBelova): we'll add profiler.get_info() method
636 # to extract this info -> we'll need to update these lines
637 trace_info = {
638 "hmac_key": prof.hmac_key,
639 "base_id": prof.get_base_id(),
640 "parent_id": prof.get_id()
641 }
642 return trace_info
645def pass_context(runner, func, *args, **kwargs):
646 """Generalised passthrough method
647 It will grab the context from the threadlocal store and add it to
648 the store on the runner. This allows for continuity in logging the
649 context when using this method to spawn a new thread through the
650 runner function
651 """
652 _context = common_context.get_current()
653 profiler_info = _serialize_profile_info()
655 @functools.wraps(func)
656 def context_wrapper(*args, **kwargs):
657 # NOTE: If update_store is not called after spawn it won't be
658 # available for the logger to pull from threadlocal storage.
659 if _context is not None:
660 _context.update_store()
661 if profiler_info and profiler: 661 ↛ 662line 661 didn't jump to line 662 because the condition on line 661 was never true
662 profiler.init(**profiler_info)
663 return func(*args, **kwargs)
665 return runner(context_wrapper, *args, **kwargs)
668def spawn(func, *args, **kwargs):
669 """Passthrough method for eventlet.spawn.
671 This utility exists so that it can be stubbed for testing without
672 interfering with the service spawns.
674 It will also grab the context from the threadlocal store and add it to
675 the store on the new thread. This allows for continuity in logging the
676 context when using this method to spawn a new thread.
677 """
679 return pass_context(_get_default_green_pool().spawn, func, *args, **kwargs)
682def spawn_n(func, *args, **kwargs):
683 """Passthrough method for eventlet.greenpool.spawn_n.
685 This utility exists so that it can be stubbed for testing without
686 interfering with the service spawns.
688 It will also grab the context from the threadlocal store and add it to
689 the store on the new thread. This allows for continuity in logging the
690 context when using this method to spawn a new thread.
691 """
693 pass_context(_get_default_green_pool().spawn_n, func, *args, **kwargs)
696def tpool_execute(func, *args, **kwargs):
697 """Run func in a native thread"""
698 return pass_context(tpool.execute, func, *args, **kwargs)
701def is_none_string(val):
702 """Check if a string represents a None value.
703 """
704 if not isinstance(val, str):
705 return False
707 return val.lower() == 'none'
710def is_auto_disk_config_disabled(auto_disk_config_raw):
711 auto_disk_config_disabled = False
712 if auto_disk_config_raw is not None:
713 adc_lowered = auto_disk_config_raw.strip().lower()
714 if adc_lowered == "disabled":
715 auto_disk_config_disabled = True
716 return auto_disk_config_disabled
719def get_auto_disk_config_from_instance(instance=None, sys_meta=None):
720 if sys_meta is None: 720 ↛ 721line 720 didn't jump to line 721 because the condition on line 720 was never true
721 sys_meta = instance_sys_meta(instance)
722 return sys_meta.get("image_auto_disk_config")
725def get_auto_disk_config_from_image_props(image_properties):
726 return image_properties.get("auto_disk_config")
729def get_system_metadata_from_image(image_meta, flavor=None):
730 system_meta = {}
731 prefix_format = SM_IMAGE_PROP_PREFIX + '%s'
733 for key, value in image_meta.get('properties', {}).items():
734 if key in SM_SKIP_KEYS:
735 continue
737 new_value = safe_truncate(str(value), 255)
738 system_meta[prefix_format % key] = new_value
740 for key in SM_INHERITABLE_KEYS:
741 value = image_meta.get(key)
743 if key == 'min_disk' and flavor:
744 if image_meta.get('disk_format') == 'vhd':
745 value = flavor['root_gb']
746 else:
747 value = max(value or 0, flavor['root_gb'])
749 if value is None:
750 continue
752 system_meta[prefix_format % key] = value
754 return system_meta
757def get_image_from_system_metadata(system_meta):
758 image_meta = {}
759 properties = {}
761 if not isinstance(system_meta, dict): 761 ↛ 762line 761 didn't jump to line 762 because the condition on line 761 was never true
762 system_meta = metadata_to_dict(system_meta, include_deleted=True)
764 for key, value in system_meta.items():
765 if value is None:
766 continue
768 # NOTE(xqueralt): Not sure this has to inherit all the properties or
769 # just the ones we need. Leaving it for now to keep the old behaviour.
770 if key.startswith(SM_IMAGE_PROP_PREFIX):
771 key = key[len(SM_IMAGE_PROP_PREFIX):]
773 if key in SM_SKIP_KEYS:
774 continue
776 if key in SM_INHERITABLE_KEYS:
777 image_meta[key] = value
778 else:
779 properties[key] = value
781 image_meta['properties'] = properties
783 return image_meta
786def get_hash_str(base_str):
787 """Returns string that represents MD5 hash of base_str (in hex format).
789 If base_str is a Unicode string, encode it to UTF-8.
790 """
791 if isinstance(base_str, str):
792 base_str = base_str.encode('utf-8')
793 return hashlib.md5(base_str, usedforsecurity=False).hexdigest()
796def get_sha256_str(base_str):
797 """Returns string that represents sha256 hash of base_str (in hex format).
799 sha1 and md5 are known to be breakable, so sha256 is a better option
800 when the hash is being used for security purposes. If hashing passwords
801 or anything else that needs to be retained for a long period a salted
802 hash is better.
803 """
804 if isinstance(base_str, str): 804 ↛ 806line 804 didn't jump to line 806 because the condition on line 804 was always true
805 base_str = base_str.encode('utf-8')
806 return hashlib.sha256(base_str).hexdigest()
809def get_obj_repr_unicode(obj):
810 """Returns a string representation of an object converted to unicode.
812 In the case of python 3, this just returns the repr() of the object,
813 else it converts the repr() to unicode.
814 """
815 obj_repr = repr(obj)
816 return obj_repr
819def safe_truncate(value, length):
820 """Safely truncates unicode strings such that their encoded length is
821 no greater than the length provided.
822 """
823 b_value = encodeutils.safe_encode(value)[:length]
825 # NOTE(chaochin) UTF-8 character byte size varies from 1 to 6. If
826 # truncating a long byte string to 255, the last character may be
827 # cut in the middle, so that UnicodeDecodeError will occur when
828 # converting it back to unicode.
829 decode_ok = False
830 while not decode_ok:
831 try:
832 u_value = encodeutils.safe_decode(b_value)
833 decode_ok = True
834 except UnicodeDecodeError:
835 b_value = b_value[:-1]
836 return u_value
839def read_cached_file(filename, force_reload=False):
840 """Read from a file if it has been modified.
842 :param force_reload: Whether to reload the file.
843 :returns: A tuple with a boolean specifying if the data is fresh
844 or not.
845 """
846 global _FILE_CACHE
848 if force_reload: 848 ↛ 849line 848 didn't jump to line 849 because the condition on line 848 was never true
849 delete_cached_file(filename)
851 reloaded = False
852 mtime = os.path.getmtime(filename)
853 cache_info = _FILE_CACHE.setdefault(filename, {})
855 if not cache_info or mtime > cache_info.get('mtime', 0):
856 LOG.debug("Reloading cached file %s", filename)
857 with open(filename) as fap:
858 cache_info['data'] = fap.read()
859 cache_info['mtime'] = mtime
860 reloaded = True
861 return (reloaded, cache_info['data'])
864def delete_cached_file(filename):
865 """Delete cached file if present.
867 :param filename: filename to delete
868 """
869 global _FILE_CACHE
871 if filename in _FILE_CACHE:
872 del _FILE_CACHE[filename]
875def isotime(at=None):
876 """Current time as ISO string,
877 as timeutils.isotime() is deprecated
879 :returns: Current time in ISO format
880 """
881 if not at:
882 at = timeutils.utcnow()
883 date_string = at.strftime("%Y-%m-%dT%H:%M:%S")
884 tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
885 date_string += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz)
886 return date_string
889def strtime(at):
890 return at.strftime("%Y-%m-%dT%H:%M:%S.%f")
893def _get_conf_group(service_type):
894 # Get the conf group corresponding to the service type.
895 confgrp = _SERVICE_TYPES.get_project_name(service_type)
896 if not confgrp or not hasattr(CONF, confgrp):
897 # Try the service type as the conf group. This is necessary for e.g.
898 # placement, while it's still part of the nova project.
899 # Note that this might become the first thing we try if/as we move to
900 # using service types for conf group names in general.
901 confgrp = service_type
902 if not confgrp or not hasattr(CONF, confgrp):
903 raise exception.ConfGroupForServiceTypeNotFound(stype=service_type)
904 return confgrp
907def _get_auth_and_session(confgrp, ksa_auth=None, ksa_session=None):
908 # Ensure we have an auth.
909 # NOTE(efried): This could be None, and that could be okay - e.g. if the
910 # result is being used for get_endpoint() and the conf only contains
911 # endpoint_override.
912 if not ksa_auth:
913 if ksa_session and ksa_session.auth:
914 ksa_auth = ksa_session.auth
915 else:
916 ksa_auth = ks_loading.load_auth_from_conf_options(CONF, confgrp)
918 if not ksa_session:
919 ksa_session = ks_loading.load_session_from_conf_options(
920 CONF, confgrp, auth=ksa_auth)
922 return ksa_auth, ksa_session
925def get_ksa_adapter(service_type, ksa_auth=None, ksa_session=None,
926 min_version=None, max_version=None):
927 """Construct a keystoneauth1 Adapter for a given service type.
929 We expect to find a conf group whose name corresponds to the service_type's
930 project according to the service-types-authority. That conf group must
931 provide at least ksa adapter options. Depending how the result is to be
932 used, ksa auth and/or session options may also be required, or the relevant
933 parameter supplied.
935 A raise_exc=False adapter is returned, meaning responses >=400 return the
936 Response object rather than raising an exception. This behavior can be
937 overridden on a per-request basis by setting raise_exc=True.
939 :param service_type: String name of the service type for which the Adapter
940 is to be constructed.
941 :param ksa_auth: A keystoneauth1 auth plugin. If not specified, we attempt
942 to find one in ksa_session. Failing that, we attempt to
943 load one from the conf.
944 :param ksa_session: A keystoneauth1 Session. If not specified, we attempt
945 to load one from the conf.
946 :param min_version: The minimum major version of the adapter's endpoint,
947 intended to be used as the lower bound of a range with
948 max_version.
949 If min_version is given with no max_version it is as
950 if max version is 'latest'.
951 :param max_version: The maximum major version of the adapter's endpoint,
952 intended to be used as the upper bound of a range with
953 min_version.
954 :return: A keystoneauth1 Adapter object for the specified service_type.
955 :raise: ConfGroupForServiceTypeNotFound If no conf group name could be
956 found for the specified service_type.
957 """
958 confgrp = _get_conf_group(service_type)
960 ksa_auth, ksa_session = _get_auth_and_session(
961 confgrp, ksa_auth, ksa_session)
963 return ks_loading.load_adapter_from_conf_options(
964 CONF, confgrp, session=ksa_session, auth=ksa_auth,
965 min_version=min_version, max_version=max_version, raise_exc=False)
968def get_sdk_adapter(
969 service_type, admin, check_service=False, conf_group=None,
970 context=None, **kwargs
971):
972 """Construct an openstacksdk-brokered Adapter for a given service type.
974 We expect to find a conf group whose name corresponds to the service_type's
975 project according to the service-types-authority. That conf group must
976 provide ksa auth, session, and adapter options.
978 :param service_type: String name of the service type for which the Adapter
979 is to be constructed.
980 :param admin: If set to true, the service will use Nova's service user
981 and password; otherwise, it will use the user's token.
982 :param check_service: If True, we will query the endpoint to make sure the
983 service is alive, raising ServiceUnavailable if it is not.
984 :param conf_group: String name of the conf group to use, otherwise the name
985 of the service_type will be used.
986 :param context: Use to get user's token, if admin is set to False.
987 :param kwargs: Additional arguments to pass to the Adapter constructor.
988 Mainly used to pass microversion to a specific service,
989 e.g. shared_file_system_api_version="2.82".
990 :return: An openstack.proxy.Proxy object for the specified service_type.
991 :raise: ConfGroupForServiceTypeNotFound If no conf group name could be
992 found for the specified service_type.
993 :raise: ServiceUnavailable if check_service is True and the service is down
994 """
995 confgrp = conf_group or _get_conf_group(service_type)
997 try:
998 if admin is False:
999 if context is None: 999 ↛ 1000line 999 didn't jump to line 1000 because the condition on line 999 was never true
1000 raise ValueError(
1001 "If admin is set to False then context cannot be None.")
1003 # NOTE(gibi): this is only needed to make sure
1004 # CONF.service_user.auth_url config is registered
1005 ks_loading.load_auth_from_conf_options(
1006 CONF, nova.conf.service_token.service_user.name)
1008 # Create a connection using the user's token instead of nova's
1009 # service user/pass.
1010 conn = connection.Connection(
1011 token=context.auth_token,
1012 auth_type="v3token",
1013 project_id=context.project_id,
1014 project_domain_id=context.project_domain_id,
1015 auth_url=CONF.service_user.auth_url,
1016 service_types={service_type},
1017 strict_proxies=check_service,
1018 **kwargs,
1019 )
1020 else:
1021 # Create a connection based on nova's service user/pass
1022 sess = _get_auth_and_session(confgrp)[1]
1023 conn = connection.Connection(
1024 session=sess, oslo_conf=CONF, service_types={service_type},
1025 strict_proxies=check_service, **kwargs)
1027 except sdk_exc.ServiceDiscoveryException as e:
1028 raise exception.ServiceUnavailable(
1029 _("The %(service_type)s service is unavailable: %(error)s") %
1030 {'service_type': service_type, 'error': str(e)})
1031 # The replace('-', '_') below is to handle service names that use
1032 # hyphens and SDK attributes that use underscores.
1033 # e.g. service name --> sdk attribute
1034 # 'shared-file-system' --> 'shared_file_system'
1035 return getattr(conn, service_type.replace('-', '_'))
1038def get_endpoint(ksa_adapter):
1039 """Get the endpoint URL represented by a keystoneauth1 Adapter.
1041 This method is equivalent to what
1043 ksa_adapter.get_endpoint()
1045 should do, if it weren't for a panoply of bugs.
1047 :param ksa_adapter: keystoneauth1.adapter.Adapter, appropriately set up
1048 with an endpoint_override; or service_type, interface
1049 (list) and auth/service_catalog.
1050 :return: String endpoint URL.
1051 :raise EndpointNotFound: If endpoint discovery fails.
1052 """
1053 # TODO(efried): This will be unnecessary once bug #1707993 is fixed.
1054 # (At least for the non-image case, until 1707995 is fixed.)
1055 if ksa_adapter.endpoint_override:
1056 return ksa_adapter.endpoint_override
1057 # TODO(efried): Remove this once bug #1707995 is fixed.
1058 if ksa_adapter.service_type == 'image':
1059 try:
1060 return ksa_adapter.get_endpoint_data().catalog_url
1061 except AttributeError:
1062 # ksa_adapter.auth is a _ContextAuthPlugin, which doesn't have
1063 # get_endpoint_data. Fall through to using get_endpoint().
1064 pass
1065 return ksa_adapter.get_endpoint()
1068def generate_hostid(host, project_id):
1069 """Generate an obfuscated host id representing the host.
1071 This is a hashed value so will not actually look like a hostname, and is
1072 hashed with data from the project_id.
1074 :param host: The name of the compute host.
1075 :param project_id: The UUID of the project.
1076 :return: An obfuscated hashed host id string, return "" if host is empty
1077 """
1078 if host:
1079 data = (project_id + host).encode('utf-8')
1080 sha_hash = hashlib.sha224(data)
1081 return sha_hash.hexdigest()
1082 return ""
1085@contextlib.contextmanager
1086def nested_contexts(*contexts):
1087 with contextlib.ExitStack() as stack:
1088 yield [stack.enter_context(c) for c in contexts]
1091def normalize_rc_name(rc_name):
1092 """Normalize a resource class name to standard form."""
1093 if rc_name is None: 1093 ↛ 1094line 1093 didn't jump to line 1094 because the condition on line 1093 was never true
1094 return None
1095 # Replace non-alphanumeric characters with underscores
1096 norm_name = re.sub('[^0-9A-Za-z]+', '_', rc_name)
1097 # Bug #1762789: Do .upper after replacing non alphanumerics.
1098 norm_name = norm_name.upper()
1099 norm_name = orc.CUSTOM_NAMESPACE + norm_name
1100 return norm_name
1103def raise_if_old_compute():
1104 # to avoid circular imports
1105 from nova import context as nova_context
1106 from nova.objects import service
1108 ctxt = nova_context.get_admin_context()
1110 if CONF.api_database.connection is not None:
1111 scope = 'system'
1112 try:
1113 current_service_version = service.get_minimum_version_all_cells(
1114 ctxt, ['nova-compute'])
1115 except exception.DBNotAllowed:
1116 # This most likely means we are in a nova-compute service
1117 # configured which is configured with a connection to the API
1118 # database. We should not be attempting to "get out" of our cell to
1119 # look at the minimum versions of nova-compute services in other
1120 # cells, so DBNotAllowed was raised. Leave a warning message
1121 # and fall back to only querying computes in our cell.
1122 LOG.warning(
1123 'This service is configured for access to the API database '
1124 'but is not allowed to directly access the database. You '
1125 'should run this service without the '
1126 '[api_database]/connection config option. The service version '
1127 'check will only query the local cell.')
1128 scope = 'cell'
1129 current_service_version = service.Service.get_minimum_version(
1130 ctxt, 'nova-compute')
1131 else:
1132 scope = 'cell'
1133 # We in a cell so target our query to the current cell only
1134 current_service_version = service.Service.get_minimum_version(
1135 ctxt, 'nova-compute')
1137 if current_service_version == 0:
1138 # 0 means no compute in the system,
1139 # probably a fresh install before the computes are registered
1140 return
1142 oldest_supported_service_level = service.SERVICE_VERSION_ALIASES[
1143 service.OLDEST_SUPPORTED_SERVICE_VERSION]
1145 if current_service_version < oldest_supported_service_level:
1146 raise exception.TooOldComputeService(
1147 oldest_supported_version=service.OLDEST_SUPPORTED_SERVICE_VERSION,
1148 scope=scope,
1149 min_service_level=current_service_version,
1150 oldest_supported_service=oldest_supported_service_level)
1153def run_once(message, logger, cleanup=None):
1154 """This is a utility function decorator to ensure a function
1155 is run once and only once in an interpreter instance.
1157 Note: this is copied from the placement repo (placement/util.py)
1159 The decorated function object can be reset by calling its
1160 reset function. All exceptions raised by the wrapped function,
1161 logger and cleanup function will be propagated to the caller.
1162 """
1163 def outer_wrapper(func):
1164 @functools.wraps(func)
1165 def wrapper(*args, **kwargs):
1166 if not wrapper.called:
1167 # Note(sean-k-mooney): the called state is always
1168 # updated even if the wrapped function completes
1169 # by raising an exception. If the caller catches
1170 # the exception it is their responsibility to call
1171 # reset if they want to re-execute the wrapped function.
1172 try:
1173 return func(*args, **kwargs)
1174 finally:
1175 wrapper.called = True
1176 else:
1177 logger(message)
1179 wrapper.called = False
1181 def reset(wrapper, *args, **kwargs):
1182 # Note(sean-k-mooney): we conditionally call the
1183 # cleanup function if one is provided only when the
1184 # wrapped function has been called previously. We catch
1185 # and reraise any exception that may be raised and update
1186 # the called state in a finally block to ensure its
1187 # always updated if reset is called.
1188 try:
1189 if cleanup and wrapper.called:
1190 return cleanup(*args, **kwargs)
1191 finally:
1192 wrapper.called = False
1194 wrapper.reset = functools.partial(reset, wrapper)
1195 return wrapper
1196 return outer_wrapper
1199class _SentinelException(Exception):
1200 """This type exists to act as a placeholder and will never be raised"""
1203def latch_error_on_raise(retryable=(_SentinelException,)):
1204 """This is a utility decorator to ensure if a function ever raises
1205 it will always raise the same exception going forward.
1207 The only exception we know is safe to ignore is an oslo db connection
1208 error as the db may be temporarily unavailable and we should allow
1209 mod_wsgi to retry
1210 """
1212 def outer_wrapper(func):
1213 @functools.wraps(func)
1214 def wrapper(*args, **kwargs):
1215 if wrapper.error:
1216 raise wrapper.error
1217 try:
1218 return func(*args, **kwargs)
1219 except retryable:
1220 # reraise any retryable exception to allow them to be handled
1221 # by the caller.
1222 raise
1223 except Exception as e:
1224 wrapper.error = e
1225 LOG.exception(e)
1226 raise
1228 wrapper.error = None
1230 def reset(wrapper):
1231 wrapper.error = None
1233 wrapper.reset = functools.partial(reset, wrapper)
1234 return wrapper
1235 return outer_wrapper