Coverage for nova/objects/service.py: 99%
221 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
1# Copyright 2013 IBM Corp.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
15from oslo_log import log as logging
16from oslo_utils import uuidutils
17from oslo_utils import versionutils
19from nova import availability_zones
20from nova import context as nova_context
21from nova.db.main import api as db
22from nova import exception
23from nova.notifications.objects import base as notification
24from nova.notifications.objects import service as service_notification
25from nova import objects
26from nova.objects import base
27from nova.objects import fields
30LOG = logging.getLogger(__name__)
33# NOTE(danms): This is the global service version counter
34SERVICE_VERSION = 68
37# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
38# time we bump the version, we will put an entry here to record the change,
39# along with any pertinent data. For things that we can programmatically
40# detect that need a bump, we put something in _collect_things() below to
41# assemble a dict of things we can check. For example, we pretty much always
42# want to consider the compute RPC API version a thing that requires a service
43# bump so that we can drive version pins from it. We could include other
44# service RPC versions at some point, minimum object versions, etc.
45#
46# The TestServiceVersion test will fail if the calculated set of
47# things differs from the value in the last item of the list below,
48# indicating that a version bump is needed.
49#
50# Also note that there are other reasons we may want to bump this,
51# which will not be caught by the test. An example of this would be
52# triggering (or disabling) an online data migration once all services
53# in the cluster are at the same level.
54#
55# If a version bump is required for something mechanical, just document
56# that generic thing here (like compute RPC version bumps). No need to
57# replicate the details from compute/rpcapi.py here. However, for more
58# complex service interactions, extra detail should be provided
59SERVICE_VERSION_HISTORY = (
60 # Version 0: Pre-history
61 {'compute_rpc': '4.0'},
63 # Version 1: Introduction of SERVICE_VERSION
64 {'compute_rpc': '4.4'},
65 # Version 2: Compute RPC version 4.5
66 {'compute_rpc': '4.5'},
67 # Version 3: Compute RPC version 4.6
68 {'compute_rpc': '4.6'},
69 # Version 4: Add PciDevice.parent_addr (data migration needed)
70 {'compute_rpc': '4.6'},
71 # Version 5: Compute RPC version 4.7
72 {'compute_rpc': '4.7'},
73 # Version 6: Compute RPC version 4.8
74 {'compute_rpc': '4.8'},
75 # Version 7: Compute RPC version 4.9
76 {'compute_rpc': '4.9'},
77 # Version 8: Compute RPC version 4.10
78 {'compute_rpc': '4.10'},
79 # Version 9: Compute RPC version 4.11
80 {'compute_rpc': '4.11'},
81 # Version 10: Compute node conversion to Inventories
82 {'compute_rpc': '4.11'},
83 # Version 11: Compute RPC version 4.12
84 {'compute_rpc': '4.12'},
85 # Version 12: The network APIs and compute manager support a NetworkRequest
86 # object where the network_id value is 'auto' or 'none'. BuildRequest
87 # objects are populated by nova-api during instance boot.
88 {'compute_rpc': '4.12'},
89 # Version 13: Compute RPC version 4.13
90 {'compute_rpc': '4.13'},
91 # Version 14: The compute manager supports setting device tags.
92 {'compute_rpc': '4.13'},
93 # Version 15: Indicate that nova-conductor will stop a boot if BuildRequest
94 # is deleted before RPC to nova-compute.
95 {'compute_rpc': '4.13'},
96 # Version 16: Indicate that nova-compute will refuse to start if it doesn't
97 # have a placement section configured.
98 {'compute_rpc': '4.13'},
99 # Version 17: Add 'reserve_volume' to the boot from volume flow and
100 # remove 'check_attach'. The service version bump is needed to fall back to
101 # the old check in the API as the old computes fail if the volume is moved
102 # to 'attaching' state by reserve.
103 {'compute_rpc': '4.13'},
104 # Version 18: Compute RPC version 4.14
105 {'compute_rpc': '4.14'},
106 # Version 19: Compute RPC version 4.15
107 {'compute_rpc': '4.15'},
108 # Version 20: Compute RPC version 4.16
109 {'compute_rpc': '4.16'},
110 # Version 21: Compute RPC version 4.17
111 {'compute_rpc': '4.17'},
112 # Version 22: A marker for the behaviour change of auto-healing code on the
113 # compute host regarding allocations against an instance
114 {'compute_rpc': '4.17'},
115 # Version 23: Compute hosts allow pre-creation of the migration object
116 # for cold migration.
117 {'compute_rpc': '4.18'},
118 # Version 24: Add support for Cinder v3 attach/detach API.
119 {'compute_rpc': '4.18'},
120 # Version 25: Compute hosts allow migration-based allocations
121 # for live migration.
122 {'compute_rpc': '4.18'},
123 # Version 26: Adds a 'host_list' parameter to build_and_run_instance()
124 {'compute_rpc': '4.19'},
125 # Version 27: Compute RPC version 4.20; adds multiattach argument to
126 # reserve_block_device_name().
127 {'compute_rpc': '4.20'},
128 # Version 28: Adds a 'host_list' parameter to prep_resize()
129 {'compute_rpc': '4.21'},
130 # Version 29: Compute RPC version 4.22
131 {'compute_rpc': '4.22'},
132 # Version 30: Compute RPC version 5.0
133 {'compute_rpc': '5.0'},
134 # Version 31: The compute manager checks if 'trusted_certs' are supported
135 {'compute_rpc': '5.0'},
136 # Version 32: Add 'file_backed_memory' support. The service version bump is
137 # needed to allow the destination of a live migration to reject the
138 # migration if 'file_backed_memory' is enabled and the source does not
139 # support 'file_backed_memory'
140 {'compute_rpc': '5.0'},
141 # Version 33: Add support for check on the server group with
142 # 'max_server_per_host' rules
143 {'compute_rpc': '5.0'},
144 # Version 34: Adds support to abort queued/preparing live migrations.
145 {'compute_rpc': '5.0'},
146 # Version 35: Indicates that nova-compute supports live migration with
147 # ports bound early on the destination host using VIFMigrateData.
148 {'compute_rpc': '5.0'},
149 # Version 36: Indicates that nova-compute supports specifying volume
150 # type when booting a volume-backed server.
151 {'compute_rpc': '5.0'},
152 # Version 37: prep_resize takes a RequestSpec object
153 {'compute_rpc': '5.1'},
154 # Version 38: set_host_enabled reflects COMPUTE_STATUS_DISABLED trait
155 {'compute_rpc': '5.1'},
156 # Version 39: resize_instance, finish_resize, revert_resize,
157 # finish_revert_resize, unshelve_instance takes a RequestSpec object
158 {'compute_rpc': '5.2'},
159 # Version 40: Add migration and limits parameters to
160 # check_can_live_migrate_destination(), new
161 # drop_move_claim_at_destination() method, and numa_live_migration
162 # parameter to check_can_live_migrate_source()
163 {'compute_rpc': '5.3'},
164 # Version 41: Add cache_images() to compute rpcapi (version 5.4)
165 {'compute_rpc': '5.4'},
166 # Version 42: Compute RPC version 5.5; +prep_snapshot_based_resize_at_dest
167 {'compute_rpc': '5.5'},
168 # Version 43: Compute RPC version 5.6: prep_snapshot_based_resize_at_source
169 {'compute_rpc': '5.6'},
170 # Version 44: Compute RPC version 5.7: finish_snapshot_based_resize_at_dest
171 {'compute_rpc': '5.7'},
172 # Version 45: Compute RPC v5.8: confirm_snapshot_based_resize_at_source
173 {'compute_rpc': '5.8'},
174 # Version 46: Compute RPC v5.9: revert_snapshot_based_resize_at_dest
175 {'compute_rpc': '5.9'},
176 # Version 47: Compute RPC v5.10:
177 # finish_revert_snapshot_based_resize_at_source
178 {'compute_rpc': '5.10'},
179 # Version 48: Drivers report COMPUTE_SAME_HOST_COLD_MIGRATE trait.
180 {'compute_rpc': '5.10'},
181 # Version 49: Compute now support server move operations with qos ports
182 {'compute_rpc': '5.10'},
183 # Version 50: Compute RPC v5.11:
184 # Add accel_uuids (accelerator requests) param to build_and_run_instance
185 {'compute_rpc': '5.11'},
186 # Version 51: Add support for live migration with vpmem
187 {'compute_rpc': '5.11'},
188 # Version 52: Add support for the 'mixed' CPU allocation policy
189 {'compute_rpc': '5.11'},
190 # Version 53: Compute RPC v5.12:
191 # Add accel_uuids (accelerator requests) param to rebuild_instance
192 {'compute_rpc': '5.12'},
193 # Version 54: Compute RPC v5.13:
194 # Add accel_uuids (accelerator requests) param to shelve_instance and
195 # shelve_offload_instance and unshelve_instance
196 {'compute_rpc': '5.13'},
197 # Version 55: Compute RPC v5.13:
198 # Add support for qos interface attach
199 {'compute_rpc': '5.13'},
200 # Version 56: Compute RPC v6.0:
201 {'compute_rpc': '6.0'},
202 # Version 57: Compute RPC v6.0:
203 # Add support for vnic 'accelerator-direct'.
204 {'compute_rpc': '6.0'},
205 # Version 58: Compute RPC v6.0:
206 # Add support for booting with neutron extended resource request
207 {'compute_rpc': '6.0'},
208 # Version 59: Compute RPC v6.0:
209 # Add support for server move operations with neutron extended resource
210 # request
211 {'compute_rpc': '6.0'},
212 # Version 60: Compute RPC v6.0:
213 # Add support for interface attach operation with neutron extended resource
214 # request
215 {'compute_rpc': '6.0'},
216 # Version 61: Compute RPC v6.0:
217 # Add support for remotely-managed ports (vnic-type 'remote-managed')
218 {'compute_rpc': '6.0'},
219 # Version 62: Compute RPC v6.0:
220 # Add support for VDPA port attach/detach
221 {'compute_rpc': '6.0'},
222 # Version 63: Compute RPC v6.0:
223 # Add support for VDPA hotplug live migration and suspend/resume
224 {'compute_rpc': '6.0'},
225 # Version 64: Compute RPC v6.1:
226 # Add reimage_boot_volume parameter to rebuild_instance()
227 {'compute_rpc': '6.1'},
228 # Version 65: Compute RPC v6.1:
229 # Added stable local node identity
230 {'compute_rpc': '6.1'},
231 # Version 66: Compute RPC v6.2:
232 # Add target_state parameter to rebuild_instance()
233 {'compute_rpc': '6.2'},
234 # Version 67: Compute RPC v6.3:
235 # Add delete_attachment parameter to remove_volume_connection()
236 {'compute_rpc': '6.3'},
237 # Version 68: Compute RPC v6.4:
238 # Add support for shares
239 {'compute_rpc': '6.4'},
240)
242# This is the version after which we can rely on having a persistent
243# local node identity for single-node systems.
244NODE_IDENTITY_VERSION = 65
246# This is used to raise an error at service startup if older than supported
247# computes are detected.
248# NOTE(sbauza) : Please modify it this way :
249# * At the beginning of a non-SLURP release (eg. 2023.2 Bobcat) (or just after
250# the previous SLURP release RC1, like 2023.1 Antelope), please bump
251# OLDEST_SUPPORTED_SERVICE_VERSION to the previous SLURP release (in that
252# example, Antelope)
253# * At the beginning of a SLURP release (eg. 2024.1 C) (or just after the
254# previous non-SLURP release RC1, like 2023.2 Bobcat), please keep the
255# OLDEST_SUPPORTED_SERVICE_VERSION value using the previous SLURP release
256# (in that example, Antelope)
257# * At the end of any release (SLURP or non-SLURP), please modify
258# SERVICE_VERSION_ALIASES to add a key/value with key being the release name
259# and value be the latest service version that the release supports (for
260# example, before Bobcat RC1, please add 'Bobcat': XX where X is the latest
261# servion version that was added)
262OLDEST_SUPPORTED_SERVICE_VERSION = 'Caracal'
263SERVICE_VERSION_ALIASES = {
264 'Victoria': 52,
265 'Wallaby': 54,
266 'Xena': 57,
267 'Yoga': 61,
268 'Zed': 64,
269 'Antelope': 66,
270 'Bobcat': 66,
271 'Caracal': 66,
272 'Dalmatian': 67,
273 'Epoxy': 68,
274}
277# TODO(berrange): Remove NovaObjectDictCompat
278@base.NovaObjectRegistry.register
279class Service(base.NovaPersistentObject, base.NovaObject,
280 base.NovaObjectDictCompat):
281 # Version 1.0: Initial version
282 # Version 1.1: Added compute_node nested object
283 # Version 1.2: String attributes updated to support unicode
284 # Version 1.3: ComputeNode version 1.5
285 # Version 1.4: Added use_slave to get_by_compute_host
286 # Version 1.5: ComputeNode version 1.6
287 # Version 1.6: ComputeNode version 1.7
288 # Version 1.7: ComputeNode version 1.8
289 # Version 1.8: ComputeNode version 1.9
290 # Version 1.9: ComputeNode version 1.10
291 # Version 1.10: Changes behaviour of loading compute_node
292 # Version 1.11: Added get_by_host_and_binary
293 # Version 1.12: ComputeNode version 1.11
294 # Version 1.13: Added last_seen_up
295 # Version 1.14: Added forced_down
296 # Version 1.15: ComputeNode version 1.12
297 # Version 1.16: Added version
298 # Version 1.17: ComputeNode version 1.13
299 # Version 1.18: ComputeNode version 1.14
300 # Version 1.19: Added get_minimum_version()
301 # Version 1.20: Added get_minimum_version_multi()
302 # Version 1.21: Added uuid
303 # Version 1.22: Added get_by_uuid()
304 VERSION = '1.22'
306 fields = {
307 'id': fields.IntegerField(read_only=True),
308 'uuid': fields.UUIDField(),
309 'host': fields.StringField(nullable=True),
310 'binary': fields.StringField(nullable=True),
311 'topic': fields.StringField(nullable=True),
312 'report_count': fields.IntegerField(),
313 'disabled': fields.BooleanField(),
314 'disabled_reason': fields.StringField(nullable=True),
315 'availability_zone': fields.StringField(nullable=True),
316 'compute_node': fields.ObjectField('ComputeNode'),
317 'last_seen_up': fields.DateTimeField(nullable=True),
318 'forced_down': fields.BooleanField(),
319 'version': fields.IntegerField(),
320 }
322 _MIN_VERSION_CACHE = {}
323 _SERVICE_VERSION_CACHING = False
325 def __init__(self, *args, **kwargs):
326 # NOTE(danms): We're going against the rules here and overriding
327 # init. The reason is that we want to *ensure* that we're always
328 # setting the current service version on our objects, overriding
329 # whatever else might be set in the database, or otherwise (which
330 # is the normal reason not to override init).
331 #
332 # We also need to do this here so that it's set on the client side
333 # all the time, such that create() and save() operations will
334 # include the current service version.
335 if 'version' in kwargs:
336 raise exception.ObjectActionError(
337 action='init',
338 reason='Version field is immutable')
340 super(Service, self).__init__(*args, **kwargs)
341 self.version = SERVICE_VERSION
343 def obj_make_compatible_from_manifest(self, primitive, target_version,
344 version_manifest):
345 super(Service, self).obj_make_compatible_from_manifest(
346 primitive, target_version, version_manifest)
347 _target_version = versionutils.convert_version_to_tuple(target_version)
348 if _target_version < (1, 21) and 'uuid' in primitive:
349 del primitive['uuid']
350 if _target_version < (1, 16) and 'version' in primitive:
351 del primitive['version']
352 if _target_version < (1, 14) and 'forced_down' in primitive:
353 del primitive['forced_down']
354 if _target_version < (1, 13) and 'last_seen_up' in primitive:
355 del primitive['last_seen_up']
356 if _target_version < (1, 10):
357 # service.compute_node was not lazy-loaded, we need to provide it
358 # when called
359 self._do_compute_node(self._context, primitive,
360 version_manifest)
362 def _do_compute_node(self, context, primitive, version_manifest):
363 try:
364 target_version = version_manifest['ComputeNode']
365 # NOTE(sbauza): Ironic deployments can have multiple
366 # nodes for the same service, but for keeping same behaviour,
367 # returning only the first elem of the list
368 compute = objects.ComputeNodeList.get_all_by_host(
369 context, primitive['host'])[0]
370 except Exception:
371 return
372 primitive['compute_node'] = compute.obj_to_primitive(
373 target_version=target_version,
374 version_manifest=version_manifest)
376 @staticmethod
377 def _from_db_object(context, service, db_service):
378 allow_missing = ('availability_zone',)
379 for key in service.fields:
380 if key in allow_missing and key not in db_service:
381 continue
382 if key == 'compute_node':
383 # NOTE(sbauza); We want to only lazy-load compute_node
384 continue
385 elif key == 'version':
386 # NOTE(danms): Special handling of the version field, since
387 # it is read_only and set in our init.
388 setattr(service, base.get_attrname(key), db_service[key])
389 elif key == 'uuid' and not db_service.get(key):
390 # Leave uuid off the object if undefined in the database
391 # so that it will be generated below.
392 continue
393 else:
394 service[key] = db_service[key]
396 service._context = context
397 service.obj_reset_changes()
399 return service
401 @base.lazy_load_counter
402 def obj_load_attr(self, attrname):
403 if not self._context:
404 raise exception.OrphanedObjectError(method='obj_load_attr',
405 objtype=self.obj_name())
407 LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s",
408 {'attr': attrname,
409 'name': self.obj_name(),
410 'id': self.id,
411 })
412 if attrname != 'compute_node':
413 raise exception.ObjectActionError(
414 action='obj_load_attr',
415 reason='attribute %s not lazy-loadable' % attrname)
416 if self.binary == 'nova-compute': 416 ↛ 423line 416 didn't jump to line 423 because the condition on line 416 was always true
417 # Only n-cpu services have attached compute_node(s)
418 compute_nodes = objects.ComputeNodeList.get_all_by_host(
419 self._context, self.host)
420 else:
421 # NOTE(sbauza); Previous behaviour was raising a ServiceNotFound,
422 # we keep it for backwards compatibility
423 raise exception.ServiceNotFound(service_id=self.id)
424 # NOTE(sbauza): Ironic deployments can have multiple nodes
425 # for the same service, but for keeping same behaviour, returning only
426 # the first elem of the list
427 self.compute_node = compute_nodes[0]
429 @base.remotable_classmethod
430 def get_by_id(cls, context, service_id):
431 db_service = db.service_get(context, service_id)
432 return cls._from_db_object(context, cls(), db_service)
434 @base.remotable_classmethod
435 def get_by_uuid(cls, context, service_uuid):
436 db_service = db.service_get_by_uuid(context, service_uuid)
437 return cls._from_db_object(context, cls(), db_service)
439 @base.remotable_classmethod
440 def get_by_host_and_topic(cls, context, host, topic):
441 db_service = db.service_get_by_host_and_topic(context, host, topic)
442 return cls._from_db_object(context, cls(), db_service)
444 @base.remotable_classmethod
445 def get_by_host_and_binary(cls, context, host, binary):
446 try:
447 db_service = db.service_get_by_host_and_binary(context,
448 host, binary)
449 except exception.HostBinaryNotFound:
450 return
451 return cls._from_db_object(context, cls(), db_service)
453 @staticmethod
454 @db.select_db_reader_mode
455 def _db_service_get_by_compute_host(context, host, use_slave=False):
456 return db.service_get_by_compute_host(context, host)
458 @base.remotable_classmethod
459 def get_by_compute_host(cls, context, host, use_slave=False):
460 db_service = cls._db_service_get_by_compute_host(context, host,
461 use_slave=use_slave)
462 return cls._from_db_object(context, cls(), db_service)
464 # NOTE(ndipanov): This is deprecated and should be removed on the next
465 # major version bump
466 @base.remotable_classmethod
467 def get_by_args(cls, context, host, binary):
468 db_service = db.service_get_by_host_and_binary(context, host, binary)
469 return cls._from_db_object(context, cls(), db_service)
471 def _check_minimum_version(self):
472 """Enforce that we are not older that the minimum version.
474 This is a loose check to avoid creating or updating our service
475 record if we would do so with a version that is older that the current
476 minimum of all services. This could happen if we were started with
477 older code by accident, either due to a rollback or an old and
478 un-updated node suddenly coming back onto the network.
480 There is technically a race here between the check and the update,
481 but since the minimum version should always roll forward and never
482 backwards, we don't need to worry about doing it atomically. Further,
483 the consequence for getting this wrong is minor, in that we'll just
484 fail to send messages that other services understand.
485 """
486 if not self.obj_attr_is_set('version'): 486 ↛ 487line 486 didn't jump to line 487 because the condition on line 486 was never true
487 return
488 if not self.obj_attr_is_set('binary'):
489 return
490 minver = self.get_minimum_version(self._context, self.binary)
491 if minver > self.version:
492 raise exception.ServiceTooOld(thisver=self.version,
493 minver=minver)
495 @base.remotable
496 def create(self):
497 if self.obj_attr_is_set('id'):
498 raise exception.ObjectActionError(action='create',
499 reason='already created')
500 self._check_minimum_version()
501 updates = self.obj_get_changes()
503 if 'uuid' not in updates:
504 updates['uuid'] = uuidutils.generate_uuid()
505 self.uuid = updates['uuid']
507 db_service = db.service_create(self._context, updates)
508 self._from_db_object(self._context, self, db_service)
509 self._send_notification(fields.NotificationAction.CREATE)
511 @base.remotable
512 def save(self):
513 updates = self.obj_get_changes()
514 updates.pop('id', None)
515 self._check_minimum_version()
516 db_service = db.service_update(self._context, self.id, updates)
517 self._from_db_object(self._context, self, db_service)
519 self._send_status_update_notification(updates)
521 def _send_status_update_notification(self, updates):
522 # Note(gibi): We do not trigger notification on version as that field
523 # is always dirty, which would cause that nova sends notification on
524 # every other field change. See the comment in save() too.
525 if set(updates.keys()).intersection(
526 {'disabled', 'disabled_reason', 'forced_down'}):
527 self._send_notification(fields.NotificationAction.UPDATE)
529 def _send_notification(self, action):
530 payload = service_notification.ServiceStatusPayload(self)
531 service_notification.ServiceStatusNotification(
532 publisher=notification.NotificationPublisher.from_service_obj(
533 self),
534 event_type=notification.EventType(
535 object='service',
536 action=action),
537 priority=fields.NotificationPriority.INFO,
538 payload=payload).emit(self._context)
540 @base.remotable
541 def destroy(self):
542 db.service_destroy(self._context, self.id)
543 self._send_notification(fields.NotificationAction.DELETE)
545 @classmethod
546 def enable_min_version_cache(cls):
547 cls.clear_min_version_cache()
548 cls._SERVICE_VERSION_CACHING = True
550 @classmethod
551 def clear_min_version_cache(cls):
552 cls._MIN_VERSION_CACHE = {}
554 @staticmethod
555 @db.select_db_reader_mode
556 def _db_service_get_minimum_version(context, binaries, use_slave=False):
557 return db.service_get_minimum_version(context, binaries)
559 @base.remotable_classmethod
560 def get_minimum_version_multi(cls, context, binaries, use_slave=False):
561 if not all(binary.startswith('nova-') for binary in binaries):
562 LOG.warning('get_minimum_version called with likely-incorrect '
563 'binaries `%s\'', ','.join(binaries))
564 raise exception.ObjectActionError(action='get_minimum_version',
565 reason='Invalid binary prefix')
567 if (not cls._SERVICE_VERSION_CACHING or
568 any(binary not in cls._MIN_VERSION_CACHE
569 for binary in binaries)):
570 min_versions = cls._db_service_get_minimum_version(
571 context, binaries, use_slave=use_slave)
572 if min_versions:
573 min_versions = {binary: version or 0
574 for binary, version in
575 min_versions.items()}
576 cls._MIN_VERSION_CACHE.update(min_versions)
577 else:
578 min_versions = {binary: cls._MIN_VERSION_CACHE[binary]
579 for binary in binaries}
581 if min_versions:
582 version = min(min_versions.values())
583 else:
584 version = 0
585 # NOTE(danms): Since our return value is not controlled by object
586 # schema, be explicit here.
587 version = int(version)
589 return version
591 @base.remotable_classmethod
592 def get_minimum_version(cls, context, binary, use_slave=False):
593 return cls.get_minimum_version_multi(context, [binary],
594 use_slave=use_slave)
597def get_minimum_version_all_cells(context, binaries, require_all=False):
598 """Get the minimum service version, checking all cells.
600 This attempts to calculate the minimum service version for a set
601 of binaries across all the cells in the system. If require_all
602 is False, then any cells that fail to report a version will be
603 ignored (assuming they won't be candidates for scheduling and thus
604 excluding them from the minimum version calculation is reasonable).
605 If require_all is True, then a failing cell will cause this to raise
606 exception.CellTimeout, as would be appropriate for gating some
607 data migration until everything is new enough.
609 Note that services that do not report a positive version are excluded
610 from this, as it crosses all cells which will naturally not have all
611 services.
612 """
614 if not all(binary.startswith('nova-') for binary in binaries):
615 LOG.warning('get_minimum_version_all_cells called with '
616 'likely-incorrect binaries `%s\'', ','.join(binaries))
617 raise exception.ObjectActionError(
618 action='get_minimum_version_all_cells',
619 reason='Invalid binary prefix')
621 # NOTE(danms): Instead of using Service.get_minimum_version_multi(), we
622 # replicate the call directly to the underlying DB method here because
623 # we want to defeat the caching and we need to filter non-present
624 # services differently from the single-cell method.
626 results = nova_context.scatter_gather_all_cells(
627 context,
628 Service._db_service_get_minimum_version,
629 binaries)
631 min_version = None
632 for cell_uuid, result in results.items():
633 if result is nova_context.did_not_respond_sentinel:
634 LOG.warning('Cell %s did not respond when getting minimum '
635 'service version', cell_uuid)
636 if require_all:
637 raise exception.CellTimeout()
638 elif isinstance(result, Exception):
639 LOG.warning('Failed to get minimum service version for cell %s',
640 cell_uuid)
641 if require_all:
642 # NOTE(danms): Okay, this isn't necessarily a timeout, but
643 # it's functionally the same from the caller's perspective
644 # and we logged the fact that it was actually a failure
645 # for the forensic investigator during the scatter/gather
646 # routine.
647 raise exception.CellTimeout()
648 else:
649 # NOTE(danms): Don't consider a zero or None result as the minimum
650 # since we're crossing cells and will likely not have all the
651 # services being probed.
652 relevant_versions = [version for version in result.values()
653 if version]
654 if relevant_versions:
655 min_version_cell = min(relevant_versions)
656 min_version = (min(min_version, min_version_cell)
657 if min_version else min_version_cell)
659 # NOTE(danms): If we got no matches at all (such as at first startup)
660 # then report that as zero to be consistent with the other such
661 # methods.
662 return min_version or 0
665@base.NovaObjectRegistry.register
666class ServiceList(base.ObjectListBase, base.NovaObject):
667 # Version 1.0: Initial version
668 # Service <= version 1.2
669 # Version 1.1 Service version 1.3
670 # Version 1.2: Service version 1.4
671 # Version 1.3: Service version 1.5
672 # Version 1.4: Service version 1.6
673 # Version 1.5: Service version 1.7
674 # Version 1.6: Service version 1.8
675 # Version 1.7: Service version 1.9
676 # Version 1.8: Service version 1.10
677 # Version 1.9: Added get_by_binary() and Service version 1.11
678 # Version 1.10: Service version 1.12
679 # Version 1.11: Service version 1.13
680 # Version 1.12: Service version 1.14
681 # Version 1.13: Service version 1.15
682 # Version 1.14: Service version 1.16
683 # Version 1.15: Service version 1.17
684 # Version 1.16: Service version 1.18
685 # Version 1.17: Service version 1.19
686 # Version 1.18: Added include_disabled parameter to get_by_binary()
687 # Version 1.19: Added get_all_computes_by_hv_type()
688 VERSION = '1.19'
690 fields = {
691 'objects': fields.ListOfObjectsField('Service'),
692 }
694 @base.remotable_classmethod
695 def get_by_topic(cls, context, topic):
696 db_services = db.service_get_all_by_topic(context, topic)
697 return base.obj_make_list(context, cls(context), objects.Service,
698 db_services)
700 # NOTE(paul-carlton2): In v2.0 of the object the include_disabled flag
701 # will be removed so both enabled and disabled hosts are returned
702 @base.remotable_classmethod
703 def get_by_binary(cls, context, binary, include_disabled=False):
704 db_services = db.service_get_all_by_binary(
705 context, binary, include_disabled=include_disabled)
706 return base.obj_make_list(context, cls(context), objects.Service,
707 db_services)
709 @base.remotable_classmethod
710 def get_by_host(cls, context, host):
711 db_services = db.service_get_all_by_host(context, host)
712 return base.obj_make_list(context, cls(context), objects.Service,
713 db_services)
715 @base.remotable_classmethod
716 def get_all(cls, context, disabled=None, set_zones=False):
717 db_services = db.service_get_all(context, disabled=disabled)
718 if set_zones:
719 db_services = availability_zones.set_availability_zones(
720 context, db_services)
721 return base.obj_make_list(context, cls(context), objects.Service,
722 db_services)
724 @base.remotable_classmethod
725 def get_all_computes_by_hv_type(cls, context, hv_type):
726 db_services = db.service_get_all_computes_by_hv_type(
727 context, hv_type, include_disabled=False)
728 return base.obj_make_list(context, cls(context), objects.Service,
729 db_services)