Coverage for nova/objects/instance.py: 92%

814 statements  

« prev     ^ index     » next       coverage.py v7.6.12, created at 2025-04-17 15:08 +0000

1# Copyright 2013 IBM Corp. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); you may 

4# not use this file except in compliance with the License. You may obtain 

5# a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

12# License for the specific language governing permissions and limitations 

13# under the License. 

14 

15import contextlib 

16import typing as ty 

17 

18from oslo_config import cfg 

19from oslo_db import exception as db_exc 

20from oslo_log import log as logging 

21from oslo_serialization import jsonutils 

22from oslo_utils import timeutils 

23from oslo_utils import versionutils 

24import sqlalchemy as sa 

25from sqlalchemy import sql 

26from sqlalchemy.sql import func 

27 

28from nova import availability_zones as avail_zone 

29from nova.compute import task_states 

30from nova.compute import vm_states 

31from nova import context as nova_context 

32from nova.db.main import api as db 

33from nova.db.main import models 

34from nova import exception 

35from nova.i18n import _ 

36from nova.network import model as network_model 

37from nova import notifications 

38from nova import objects 

39from nova.objects import base 

40from nova.objects import fields 

41from nova import utils 

42 

43 

44CONF = cfg.CONF 

45LOG = logging.getLogger(__name__) 

46 

47 

48# List of fields that can be joined in DB layer. 

49_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata', 

50 'info_cache', 'security_groups', 

51 'pci_devices', 'tags', 'services', 

52 'fault'] 

53# These are fields that are optional but don't translate to db columns 

54_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['flavor', 'old_flavor', 

55 'new_flavor', 'ec2_ids'] 

56# These are fields that are optional and in instance_extra 

57_INSTANCE_EXTRA_FIELDS = ['numa_topology', 'pci_requests', 

58 'flavor', 'vcpu_model', 'migration_context', 

59 'keypairs', 'device_metadata', 'trusted_certs', 

60 'resources'] 

61# These are fields that applied/drooped by migration_context 

62_MIGRATION_CONTEXT_ATTRS = ['numa_topology', 'pci_requests', 

63 'pci_devices', 'resources'] 

64 

65# These are fields that can be specified as expected_attrs 

66INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS + 

67 _INSTANCE_OPTIONAL_NON_COLUMN_FIELDS + 

68 _INSTANCE_EXTRA_FIELDS) 

69# These are fields that most query calls load by default 

70INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata', 

71 'info_cache', 'security_groups'] 

72 

73# Maximum count of tags to one instance 

74MAX_TAG_COUNT = 50 

75 

76 

77def _expected_cols(expected_attrs): 

78 """Return expected_attrs that are columns needing joining. 

79 

80 NB: This function may modify expected_attrs if one 

81 requested attribute requires another. 

82 """ 

83 if not expected_attrs: 

84 return expected_attrs 

85 

86 simple_cols = [attr for attr in expected_attrs 

87 if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS] 

88 

89 complex_cols = ['extra.%s' % field 

90 for field in _INSTANCE_EXTRA_FIELDS 

91 if field in expected_attrs] 

92 if complex_cols: 

93 simple_cols.append('extra') 

94 simple_cols = [x for x in simple_cols if x not in _INSTANCE_EXTRA_FIELDS] 

95 expected_cols = simple_cols + complex_cols 

96 # NOTE(pumaranikar): expected_cols list can contain duplicates since 

97 # caller appends column attributes to expected_attr without checking if 

98 # it is already present in the list or not. Hence, we remove duplicates 

99 # here, if any. The resultant list is sorted based on list index to 

100 # maintain the insertion order. 

101 return sorted(list(set(expected_cols)), key=expected_cols.index) 

102 

103 

104_NO_DATA_SENTINEL = object() 

105 

106 

107# TODO(berrange): Remove NovaObjectDictCompat 

108@base.NovaObjectRegistry.register 

109class Instance(base.NovaPersistentObject, base.NovaObject, 

110 base.NovaObjectDictCompat): 

111 # Version 2.0: Initial version 

112 # Version 2.1: Added services 

113 # Version 2.2: Added keypairs 

114 # Version 2.3: Added device_metadata 

115 # Version 2.4: Added trusted_certs 

116 # Version 2.5: Added hard_delete kwarg in destroy 

117 # Version 2.6: Added hidden 

118 # Version 2.7: Added resources 

119 # Version 2.8: Added compute_id 

120 VERSION = '2.8' 

121 

122 fields = { 

123 'id': fields.IntegerField(), 

124 

125 'user_id': fields.StringField(nullable=True), 

126 'project_id': fields.StringField(nullable=True), 

127 

128 'image_ref': fields.StringField(nullable=True), 

129 'kernel_id': fields.StringField(nullable=True), 

130 'ramdisk_id': fields.StringField(nullable=True), 

131 'hostname': fields.StringField(nullable=True), 

132 

133 'launch_index': fields.IntegerField(nullable=True), 

134 'key_name': fields.StringField(nullable=True), 

135 'key_data': fields.StringField(nullable=True), 

136 

137 'power_state': fields.IntegerField(nullable=True), 

138 'vm_state': fields.StringField(nullable=True), 

139 'task_state': fields.StringField(nullable=True), 

140 

141 'services': fields.ObjectField('ServiceList'), 

142 

143 'memory_mb': fields.IntegerField(nullable=True), 

144 'vcpus': fields.IntegerField(nullable=True), 

145 'root_gb': fields.IntegerField(nullable=True), 

146 'ephemeral_gb': fields.IntegerField(nullable=True), 

147 'ephemeral_key_uuid': fields.UUIDField(nullable=True), 

148 

149 'host': fields.StringField(nullable=True), 

150 'node': fields.StringField(nullable=True), 

151 'compute_id': fields.IntegerField(nullable=True), 

152 

153 # TODO(stephenfin): Remove this in version 3.0 of the object as it has 

154 # been replaced by 'flavor' 

155 'instance_type_id': fields.IntegerField(nullable=True), 

156 

157 'user_data': fields.StringField(nullable=True), 

158 

159 'reservation_id': fields.StringField(nullable=True), 

160 

161 'launched_at': fields.DateTimeField(nullable=True), 

162 'terminated_at': fields.DateTimeField(nullable=True), 

163 

164 'availability_zone': fields.StringField(nullable=True), 

165 

166 'display_name': fields.StringField(nullable=True), 

167 'display_description': fields.StringField(nullable=True), 

168 

169 'launched_on': fields.StringField(nullable=True), 

170 

171 'locked': fields.BooleanField(default=False), 

172 'locked_by': fields.StringField(nullable=True), 

173 

174 'os_type': fields.StringField(nullable=True), 

175 'architecture': fields.StringField(nullable=True), 

176 'vm_mode': fields.StringField(nullable=True), 

177 'uuid': fields.UUIDField(), 

178 

179 'root_device_name': fields.StringField(nullable=True), 

180 'default_ephemeral_device': fields.StringField(nullable=True), 

181 'default_swap_device': fields.StringField(nullable=True), 

182 'config_drive': fields.StringField(nullable=True), 

183 

184 'access_ip_v4': fields.IPV4AddressField(nullable=True), 

185 'access_ip_v6': fields.IPV6AddressField(nullable=True), 

186 

187 'auto_disk_config': fields.BooleanField(default=False), 

188 'progress': fields.IntegerField(nullable=True), 

189 

190 'shutdown_terminate': fields.BooleanField(default=False), 

191 'disable_terminate': fields.BooleanField(default=False), 

192 

193 # TODO(stephenfin): Remove this in version 3.0 of the object as it's 

194 # related to cells v1 

195 'cell_name': fields.StringField(nullable=True), 

196 

197 'metadata': fields.DictOfStringsField(), 

198 'system_metadata': fields.DictOfNullableStringsField(), 

199 

200 'info_cache': fields.ObjectField('InstanceInfoCache', 

201 nullable=True), 

202 

203 # TODO(stephenfin): Remove this in version 3.0 of the object as it's 

204 # related to nova-network 

205 'security_groups': fields.ObjectField('SecurityGroupList'), 

206 

207 'fault': fields.ObjectField('InstanceFault', nullable=True), 

208 

209 'cleaned': fields.BooleanField(default=False), 

210 

211 'pci_devices': fields.ObjectField('PciDeviceList', nullable=True), 

212 'numa_topology': fields.ObjectField('InstanceNUMATopology', 

213 nullable=True), 

214 'pci_requests': fields.ObjectField('InstancePCIRequests', 

215 nullable=True), 

216 'device_metadata': fields.ObjectField('InstanceDeviceMetadata', 

217 nullable=True), 

218 'tags': fields.ObjectField('TagList'), 

219 'flavor': fields.ObjectField('Flavor'), 

220 'old_flavor': fields.ObjectField('Flavor', nullable=True), 

221 'new_flavor': fields.ObjectField('Flavor', nullable=True), 

222 'vcpu_model': fields.ObjectField('VirtCPUModel', nullable=True), 

223 'ec2_ids': fields.ObjectField('EC2Ids'), 

224 'migration_context': fields.ObjectField('MigrationContext', 

225 nullable=True), 

226 'keypairs': fields.ObjectField('KeyPairList'), 

227 'trusted_certs': fields.ObjectField('TrustedCerts', nullable=True), 

228 'hidden': fields.BooleanField(default=False), 

229 'resources': fields.ObjectField('ResourceList', nullable=True), 

230 } 

231 

232 obj_extra_fields = ['name'] 

233 

234 def obj_make_compatible(self, primitive, target_version): 

235 super(Instance, self).obj_make_compatible(primitive, target_version) 

236 target_version = versionutils.convert_version_to_tuple(target_version) 

237 if target_version < (2, 8) and 'compute_id' in primitive: 

238 del primitive['compute_id'] 

239 if target_version < (2, 7) and 'resources' in primitive: 239 ↛ 240line 239 didn't jump to line 240 because the condition on line 239 was never true

240 del primitive['resources'] 

241 if target_version < (2, 6) and 'hidden' in primitive: 

242 del primitive['hidden'] 

243 if target_version < (2, 4) and 'trusted_certs' in primitive: 243 ↛ 244line 243 didn't jump to line 244 because the condition on line 243 was never true

244 del primitive['trusted_certs'] 

245 if target_version < (2, 3) and 'device_metadata' in primitive: 245 ↛ 246line 245 didn't jump to line 246 because the condition on line 245 was never true

246 del primitive['device_metadata'] 

247 if target_version < (2, 2) and 'keypairs' in primitive: 247 ↛ 248line 247 didn't jump to line 248 because the condition on line 247 was never true

248 del primitive['keypairs'] 

249 if target_version < (2, 1) and 'services' in primitive: 249 ↛ 250line 249 didn't jump to line 250 because the condition on line 249 was never true

250 del primitive['services'] 

251 

252 def __init__(self, *args, **kwargs): 

253 super(Instance, self).__init__(*args, **kwargs) 

254 self._reset_metadata_tracking() 

255 

256 @property 

257 def image_meta(self): 

258 return objects.ImageMeta.from_instance(self) 

259 

260 def _reset_metadata_tracking(self, fields=None): 

261 if fields is None or 'system_metadata' in fields: 

262 self._orig_system_metadata = (dict(self.system_metadata) if 

263 'system_metadata' in self else {}) 

264 if fields is None or 'metadata' in fields: 

265 self._orig_metadata = (dict(self.metadata) if 

266 'metadata' in self else {}) 

267 

268 def obj_clone(self): 

269 """Create a copy of this instance object.""" 

270 nobj = super(Instance, self).obj_clone() 

271 # Since the base object only does a deep copy of the defined fields, 

272 # need to make sure to also copy the additional tracking metadata 

273 # attributes so they don't show as changed and cause the metadata 

274 # to always be updated even when stale information. 

275 if hasattr(self, '_orig_metadata'): 275 ↛ 277line 275 didn't jump to line 277 because the condition on line 275 was always true

276 nobj._orig_metadata = dict(self._orig_metadata) 

277 if hasattr(self, '_orig_system_metadata'): 277 ↛ 279line 277 didn't jump to line 279 because the condition on line 277 was always true

278 nobj._orig_system_metadata = dict(self._orig_system_metadata) 

279 return nobj 

280 

281 def obj_reset_changes(self, fields=None, recursive=False): 

282 super(Instance, self).obj_reset_changes(fields, 

283 recursive=recursive) 

284 self._reset_metadata_tracking(fields=fields) 

285 

286 def obj_what_changed(self): 

287 changes = super(Instance, self).obj_what_changed() 

288 if 'metadata' in self and self.metadata != self._orig_metadata: 

289 changes.add('metadata') 

290 if 'system_metadata' in self and (self.system_metadata != 

291 self._orig_system_metadata): 

292 changes.add('system_metadata') 

293 return changes 

294 

295 @classmethod 

296 def _obj_from_primitive(cls, context, objver, primitive): 

297 self = super(Instance, cls)._obj_from_primitive(context, objver, 

298 primitive) 

299 self._reset_metadata_tracking() 

300 return self 

301 

302 @property 

303 def name(self): 

304 try: 

305 base_name = CONF.instance_name_template % self.id 

306 except TypeError: 

307 # Support templates like "uuid-%(uuid)s", etc. 

308 info = {} 

309 # NOTE(russellb): Don't use self.iteritems() here, as it will 

310 # result in infinite recursion on the name property. 

311 for key in self.fields: 

312 if key == 'name': 312 ↛ 314line 312 didn't jump to line 314 because the condition on line 312 was never true

313 # NOTE(danms): prevent recursion 

314 continue 

315 elif not self.obj_attr_is_set(key): 

316 # NOTE(danms): Don't trigger lazy-loads 

317 continue 

318 info[key] = self[key] 

319 try: 

320 base_name = CONF.instance_name_template % info 

321 except KeyError: 

322 base_name = self.uuid 

323 except (exception.ObjectActionError, 

324 exception.OrphanedObjectError): 

325 # This indicates self.id was not set and/or could not be 

326 # lazy loaded. What this means is the instance has not 

327 # been persisted to a db yet, which should indicate it has 

328 # not been scheduled yet. In this situation it will have a 

329 # blank name. 

330 if (self.vm_state == vm_states.BUILDING and 

331 self.task_state == task_states.SCHEDULING): 

332 base_name = '' 

333 else: 

334 # If the vm/task states don't indicate that it's being booted 

335 # then we have a bug here. Log an error and attempt to return 

336 # the uuid which is what an error above would return. 

337 LOG.error('Could not lazy-load instance.id while ' 

338 'attempting to generate the instance name.') 

339 base_name = self.uuid 

340 return base_name 

341 

342 def _flavor_from_db(self, db_flavor): 

343 """Load instance flavor information from instance_extra.""" 

344 

345 # Before we stored flavors in instance_extra, certain fields, defined 

346 # in nova.compute.flavors.system_metadata_flavor_props, were stored 

347 # in the instance.system_metadata for the embedded instance.flavor. 

348 # The "disabled" and "is_public" fields weren't one of those keys, 

349 # however, so really old instances that had their embedded flavor 

350 # converted to the serialized instance_extra form won't have the 

351 # disabled attribute set and we need to default those here so callers 

352 # don't explode trying to load instance.flavor.disabled. 

353 def _default_flavor_values(flavor): 

354 if 'disabled' not in flavor: 

355 flavor.disabled = False 

356 if 'is_public' not in flavor: 

357 flavor.is_public = True 

358 

359 flavor_info = jsonutils.loads(db_flavor) 

360 

361 self.flavor = objects.Flavor.obj_from_primitive(flavor_info['cur']) 

362 _default_flavor_values(self.flavor) 

363 if flavor_info['old']: 

364 self.old_flavor = objects.Flavor.obj_from_primitive( 

365 flavor_info['old']) 

366 _default_flavor_values(self.old_flavor) 

367 else: 

368 self.old_flavor = None 

369 if flavor_info['new']: 

370 self.new_flavor = objects.Flavor.obj_from_primitive( 

371 flavor_info['new']) 

372 _default_flavor_values(self.new_flavor) 

373 else: 

374 self.new_flavor = None 

375 self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor']) 

376 

377 @staticmethod 

378 def _from_db_object(context, instance, db_inst, expected_attrs=None): 

379 """Method to help with migration to objects. 

380 

381 Converts a database entity to a formal object. 

382 """ 

383 instance._context = context 

384 if expected_attrs is None: 

385 expected_attrs = [] 

386 # Most of the field names match right now, so be quick 

387 for field in instance.fields: 

388 if field in INSTANCE_OPTIONAL_ATTRS: 

389 continue 

390 elif field == 'deleted': 

391 instance.deleted = db_inst['deleted'] == db_inst['id'] 

392 elif field == 'cleaned': 

393 instance.cleaned = db_inst['cleaned'] == 1 

394 else: 

395 instance[field] = db_inst[field] 

396 

397 if 'metadata' in expected_attrs: 

398 instance['metadata'] = utils.instance_meta(db_inst) 

399 if 'system_metadata' in expected_attrs: 

400 instance['system_metadata'] = utils.instance_sys_meta(db_inst) 

401 if 'fault' in expected_attrs: 

402 instance['fault'] = ( 

403 objects.InstanceFault.get_latest_for_instance( 

404 context, instance.uuid)) 

405 if 'ec2_ids' in expected_attrs: 

406 instance._load_ec2_ids() 

407 if 'info_cache' in expected_attrs: 

408 if db_inst.get('info_cache') is None: 

409 instance.info_cache = None 

410 elif not instance.obj_attr_is_set('info_cache'): 

411 # TODO(danms): If this ever happens on a backlevel instance 

412 # passed to us by a backlevel service, things will break 

413 instance.info_cache = objects.InstanceInfoCache(context) 

414 if instance.info_cache is not None: 

415 instance.info_cache._from_db_object(context, 

416 instance.info_cache, 

417 db_inst['info_cache']) 

418 

419 # TODO(danms): If we are updating these on a backlevel instance, 

420 # we'll end up sending back new versions of these objects (see 

421 # above note for new info_caches 

422 if 'pci_devices' in expected_attrs: 

423 pci_devices = base.obj_make_list( 

424 context, objects.PciDeviceList(context), 

425 objects.PciDevice, db_inst['pci_devices']) 

426 instance['pci_devices'] = pci_devices 

427 

428 # TODO(stephenfin): Remove this as it's related to nova-network 

429 if 'security_groups' in expected_attrs: 

430 sec_groups = base.obj_make_list( 

431 context, objects.SecurityGroupList(context), 

432 objects.SecurityGroup, []) 

433 instance['security_groups'] = sec_groups 

434 

435 if 'tags' in expected_attrs: 

436 tags = base.obj_make_list( 

437 context, objects.TagList(context), 

438 objects.Tag, db_inst['tags']) 

439 instance['tags'] = tags 

440 

441 if 'services' in expected_attrs: 

442 services = base.obj_make_list( 

443 context, objects.ServiceList(context), 

444 objects.Service, db_inst['services']) 

445 instance['services'] = services 

446 

447 instance._extra_attributes_from_db_object(instance, db_inst, 

448 expected_attrs) 

449 

450 instance.obj_reset_changes() 

451 return instance 

452 

453 @staticmethod 

454 def _extra_attributes_from_db_object(instance, db_inst, 

455 expected_attrs=None): 

456 """Method to help with migration of extra attributes to objects. 

457 """ 

458 if expected_attrs is None: 458 ↛ 459line 458 didn't jump to line 459 because the condition on line 458 was never true

459 expected_attrs = [] 

460 # NOTE(danms): We can be called with a dict instead of a 

461 # SQLAlchemy object, so we have to be careful here 

462 if hasattr(db_inst, '__dict__'): 

463 have_extra = 'extra' in db_inst.__dict__ and db_inst['extra'] 

464 else: 

465 have_extra = 'extra' in db_inst and db_inst['extra'] 

466 

467 if 'numa_topology' in expected_attrs: 

468 if have_extra: 468 ↛ 472line 468 didn't jump to line 472 because the condition on line 468 was always true

469 instance._load_numa_topology( 

470 db_inst['extra'].get('numa_topology')) 

471 else: 

472 instance.numa_topology = None 

473 if 'pci_requests' in expected_attrs: 

474 if have_extra: 474 ↛ 478line 474 didn't jump to line 478 because the condition on line 474 was always true

475 instance._load_pci_requests( 

476 db_inst['extra'].get('pci_requests')) 

477 else: 

478 instance.pci_requests = None 

479 if 'device_metadata' in expected_attrs: 

480 if have_extra: 480 ↛ 484line 480 didn't jump to line 484 because the condition on line 480 was always true

481 instance._load_device_metadata( 

482 db_inst['extra'].get('device_metadata')) 

483 else: 

484 instance.device_metadata = None 

485 if 'vcpu_model' in expected_attrs: 

486 if have_extra: 486 ↛ 490line 486 didn't jump to line 490 because the condition on line 486 was always true

487 instance._load_vcpu_model( 

488 db_inst['extra'].get('vcpu_model')) 

489 else: 

490 instance.vcpu_model = None 

491 if 'migration_context' in expected_attrs: 

492 if have_extra: 492 ↛ 496line 492 didn't jump to line 496 because the condition on line 492 was always true

493 instance._load_migration_context( 

494 db_inst['extra'].get('migration_context')) 

495 else: 

496 instance.migration_context = None 

497 if 'keypairs' in expected_attrs: 

498 if have_extra: 498 ↛ 500line 498 didn't jump to line 500 because the condition on line 498 was always true

499 instance._load_keypairs(db_inst['extra'].get('keypairs')) 

500 if 'trusted_certs' in expected_attrs: 

501 if have_extra: 501 ↛ 505line 501 didn't jump to line 505 because the condition on line 501 was always true

502 instance._load_trusted_certs( 

503 db_inst['extra'].get('trusted_certs')) 

504 else: 

505 instance.trusted_certs = None 

506 if 'resources' in expected_attrs: 

507 if have_extra: 507 ↛ 511line 507 didn't jump to line 511 because the condition on line 507 was always true

508 instance._load_resources( 

509 db_inst['extra'].get('resources')) 

510 else: 

511 instance.resources = None 

512 if any([x in expected_attrs for x in ('flavor', 

513 'old_flavor', 

514 'new_flavor')]): 

515 if have_extra and db_inst['extra'].get('flavor'): 

516 instance._flavor_from_db(db_inst['extra']['flavor']) 

517 

518 @staticmethod 

519 @db.select_db_reader_mode 

520 def _db_instance_get_by_uuid(context, uuid, columns_to_join, 

521 use_slave=False): 

522 return db.instance_get_by_uuid(context, uuid, 

523 columns_to_join=columns_to_join) 

524 

525 @base.remotable_classmethod 

526 def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False): 

527 if expected_attrs is None: 

528 expected_attrs = ['info_cache'] 

529 columns_to_join = _expected_cols(expected_attrs) 

530 db_inst = cls._db_instance_get_by_uuid(context, uuid, columns_to_join, 

531 use_slave=use_slave) 

532 return cls._from_db_object(context, cls(), db_inst, 

533 expected_attrs) 

534 

535 @base.remotable_classmethod 

536 def get_by_id(cls, context, inst_id, expected_attrs=None): 

537 if expected_attrs is None: 537 ↛ 539line 537 didn't jump to line 539 because the condition on line 537 was always true

538 expected_attrs = ['info_cache'] 

539 columns_to_join = _expected_cols(expected_attrs) 

540 db_inst = db.instance_get(context, inst_id, 

541 columns_to_join=columns_to_join) 

542 return cls._from_db_object(context, cls(), db_inst, 

543 expected_attrs) 

544 

545 @base.remotable 

546 def create(self): 

547 if self.obj_attr_is_set('id'): 

548 raise exception.ObjectActionError(action='create', 

549 reason='already created') 

550 if self.obj_attr_is_set('deleted') and self.deleted: 

551 raise exception.ObjectActionError(action='create', 

552 reason='already deleted') 

553 updates = self.obj_get_changes() 

554 version = versionutils.convert_version_to_tuple(self.VERSION) 

555 

556 if 'node' in updates and 'compute_id' not in updates: 

557 # NOTE(danms): This is not really the best idea, as we should try 

558 # not to have different behavior based on the version of the 

559 # object. However, this exception helps us find cases in testing 

560 # where these may not be updated together. We can remove this 

561 # later. 

562 if version >= (2, 8): 

563 raise exception.ObjectActionError( 

564 ('Instance is being created with node (%r) ' 

565 'but not compute_id') % updates['node']) 

566 else: 

567 LOG.warning('Instance is being created with node %r but ' 

568 'no compute_id', updates['node']) 

569 

570 # NOTE(danms): We know because of the check above that deleted 

571 # is either unset or false. Since we need to avoid passing False 

572 # down to the DB layer (which uses an integer), we can always 

573 # default it to zero here. 

574 updates['deleted'] = 0 

575 

576 expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS 

577 if attr in updates] 

578 

579 if 'info_cache' in updates: 

580 updates['info_cache'] = { 

581 'network_info': updates['info_cache'].network_info.json() 

582 } 

583 updates['extra'] = {} 

584 numa_topology = updates.pop('numa_topology', None) 

585 expected_attrs.append('numa_topology') 

586 if numa_topology: 

587 updates['extra']['numa_topology'] = numa_topology._to_json() 

588 else: 

589 updates['extra']['numa_topology'] = None 

590 pci_requests = updates.pop('pci_requests', None) 

591 expected_attrs.append('pci_requests') 

592 if pci_requests: 

593 updates['extra']['pci_requests'] = ( 

594 pci_requests.to_json()) 

595 else: 

596 updates['extra']['pci_requests'] = None 

597 updates['extra']['pci_devices'] = None 

598 device_metadata = updates.pop('device_metadata', None) 

599 expected_attrs.append('device_metadata') 

600 if device_metadata: 

601 updates['extra']['device_metadata'] = ( 

602 device_metadata._to_json()) 

603 else: 

604 updates['extra']['device_metadata'] = None 

605 flavor = updates.pop('flavor', None) 

606 if flavor: 

607 expected_attrs.append('flavor') 

608 old = ((self.obj_attr_is_set('old_flavor') and 

609 self.old_flavor) and 

610 self.old_flavor.obj_to_primitive() or None) 

611 new = ((self.obj_attr_is_set('new_flavor') and 

612 self.new_flavor) and 

613 self.new_flavor.obj_to_primitive() or None) 

614 flavor_info = { 

615 'cur': self.flavor.obj_to_primitive(), 

616 'old': old, 

617 'new': new, 

618 } 

619 self._nullify_flavor_description(flavor_info) 

620 updates['extra']['flavor'] = jsonutils.dumps(flavor_info) 

621 keypairs = updates.pop('keypairs', None) 

622 if keypairs is not None: 

623 expected_attrs.append('keypairs') 

624 updates['extra']['keypairs'] = jsonutils.dumps( 

625 keypairs.obj_to_primitive()) 

626 vcpu_model = updates.pop('vcpu_model', None) 

627 expected_attrs.append('vcpu_model') 

628 if vcpu_model: 

629 updates['extra']['vcpu_model'] = ( 

630 jsonutils.dumps(vcpu_model.obj_to_primitive())) 

631 else: 

632 updates['extra']['vcpu_model'] = None 

633 trusted_certs = updates.pop('trusted_certs', None) 

634 expected_attrs.append('trusted_certs') 

635 if trusted_certs: 

636 updates['extra']['trusted_certs'] = jsonutils.dumps( 

637 trusted_certs.obj_to_primitive()) 

638 else: 

639 updates['extra']['trusted_certs'] = None 

640 resources = updates.pop('resources', None) 

641 expected_attrs.append('resources') 

642 if resources: 

643 updates['extra']['resources'] = jsonutils.dumps( 

644 resources.obj_to_primitive()) 

645 else: 

646 updates['extra']['resources'] = None 

647 

648 # Initially all instances have no migration context, so avoid us 

649 # trying to lazy-load it to check. 

650 updates['extra']['migration_context'] = None 

651 

652 db_inst = db.instance_create(self._context, updates) 

653 self._from_db_object(self._context, self, db_inst, expected_attrs) 

654 

655 if ('pci_devices' in updates['extra'] and 

656 updates['extra']['pci_devices'] is None): 

657 self.pci_devices = None 

658 self.migration_context = None 

659 

660 # NOTE(danms): The EC2 ids are created on their first load. In order 

661 # to avoid them being missing and having to be loaded later, we 

662 # load them once here on create now that the instance record is 

663 # created. 

664 self._load_ec2_ids() 

665 self.obj_reset_changes(['ec2_ids', 'pci_devices', 'migration_context']) 

666 

667 @base.remotable 

668 def destroy(self, hard_delete=False): 

669 if not self.obj_attr_is_set('id'): 669 ↛ 670line 669 didn't jump to line 670 because the condition on line 669 was never true

670 raise exception.ObjectActionError(action='destroy', 

671 reason='already destroyed') 

672 if not self.obj_attr_is_set('uuid'): 672 ↛ 673line 672 didn't jump to line 673 because the condition on line 672 was never true

673 raise exception.ObjectActionError(action='destroy', 

674 reason='no uuid') 

675 if not self.obj_attr_is_set('host') or not self.host: 

676 # NOTE(danms): If our host is not set, avoid a race 

677 constraint = db.constraint(host=db.equal_any(None)) 

678 else: 

679 constraint = None 

680 

681 try: 

682 db_inst = db.instance_destroy(self._context, self.uuid, 

683 constraint=constraint, 

684 hard_delete=hard_delete) 

685 self._from_db_object(self._context, self, db_inst) 

686 except exception.ConstraintNotMet: 

687 raise exception.ObjectActionError(action='destroy', 

688 reason='host changed') 

689 delattr(self, base.get_attrname('id')) 

690 

691 def _save_info_cache(self, context): 

692 if self.info_cache: 

693 with self.info_cache.obj_alternate_context(context): 

694 self.info_cache.save() 

695 

696 # TODO(stephenfin): Remove this as it's related to nova-network 

697 def _save_security_groups(self, context): 

698 # NOTE(stephenfin): We no longer bother saving these since they 

699 # shouldn't be created in the first place 

700 pass 

701 

702 def _save_fault(self, context): 

703 # NOTE(danms): I don't think we need to worry about this, do we? 

704 pass 

705 

706 def _save_pci_requests(self, context): 

707 # TODO(danms): Unfortunately, extra.pci_requests is not a serialized 

708 # PciRequests object (!), so we have to handle it specially here. 

709 # That should definitely be fixed! 

710 self._extra_values_to_save['pci_requests'] = ( 

711 self.pci_requests.to_json()) 

712 

713 def _save_pci_devices(self, context): 

714 # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker 

715 # permitted to update the DB. all change to devices from here will 

716 # be dropped. 

717 pass 

718 

719 def _save_tags(self, context): 

720 # NOTE(gibi): tags are not saved through the instance 

721 pass 

722 

723 def _save_services(self, context): 

724 # NOTE(mriedem): services are not saved through the instance 

725 pass 

726 

727 @staticmethod 

728 def _nullify_flavor_description(flavor_info): 

729 """Helper method to nullify descriptions from a set of primitive 

730 flavors. 

731 

732 Note that we don't remove the flavor description since that would 

733 make the versioned notification FlavorPayload have to handle the field 

734 not being set on the embedded instance.flavor. 

735 

736 :param dict: dict of primitive flavor objects where the values are the 

737 flavors which get persisted in the instance_extra.flavor table. 

738 """ 

739 for flavor in flavor_info.values(): 

740 if flavor and 'description' in flavor['nova_object.data']: 

741 flavor['nova_object.data']['description'] = None 

742 

743 def _save_flavor(self, context): 

744 if not any([x in self.obj_what_changed() for x in 

745 ('flavor', 'old_flavor', 'new_flavor')]): 

746 return 

747 flavor_info = { 

748 'cur': self.flavor.obj_to_primitive(), 

749 'old': (self.old_flavor and 

750 self.old_flavor.obj_to_primitive() or None), 

751 'new': (self.new_flavor and 

752 self.new_flavor.obj_to_primitive() or None), 

753 } 

754 self._nullify_flavor_description(flavor_info) 

755 self._extra_values_to_save['flavor'] = jsonutils.dumps(flavor_info) 

756 self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor']) 

757 

758 def _save_old_flavor(self, context): 

759 if 'old_flavor' in self.obj_what_changed(): 759 ↛ 760line 759 didn't jump to line 760 because the condition on line 759 was never true

760 self._save_flavor(context) 

761 

762 def _save_new_flavor(self, context): 

763 if 'new_flavor' in self.obj_what_changed(): 

764 self._save_flavor(context) 

765 

766 def _save_ec2_ids(self, context): 

767 # NOTE(hanlind): Read-only so no need to save this. 

768 pass 

769 

770 def _save_keypairs(self, context): 

771 if 'keypairs' in self.obj_what_changed(): 

772 self._save_extra_generic('keypairs') 

773 self.obj_reset_changes(['keypairs'], recursive=True) 

774 

775 def _save_extra_generic(self, field): 

776 if field in self.obj_what_changed(): 

777 obj = getattr(self, field) 

778 value = None 

779 if obj is not None: 

780 value = jsonutils.dumps(obj.obj_to_primitive()) 

781 self._extra_values_to_save[field] = value 

782 

783 # TODO(stephenfin): Remove the 'admin_state_reset' field in version 3.0 of 

784 # the object 

785 @base.remotable 

786 def save(self, expected_vm_state=None, 

787 expected_task_state=None, admin_state_reset=False): 

788 """Save updates to this instance 

789 

790 Column-wise updates will be made based on the result of 

791 self.obj_what_changed(). If expected_task_state is provided, 

792 it will be checked against the in-database copy of the 

793 instance before updates are made. 

794 

795 :param expected_vm_state: Optional tuple of valid vm states 

796 for the instance to be in 

797 :param expected_task_state: Optional tuple of valid task states 

798 for the instance to be in 

799 :param admin_state_reset: True if admin API is forcing setting 

800 of task_state/vm_state 

801 """ 

802 context = self._context 

803 

804 self._extra_values_to_save = {} 

805 updates = {} 

806 changes = self.obj_what_changed() 

807 

808 version = versionutils.convert_version_to_tuple(self.VERSION) 

809 if 'node' in changes and 'compute_id' not in changes: 

810 # NOTE(danms): This is not really the best idea, as we should try 

811 # not to have different behavior based on the version of the 

812 # object. However, this exception helps us find cases in testing 

813 # where these may not be updated together. We can remove this 

814 # later. 

815 if version >= (2, 8): 

816 raise exception.ObjectActionError( 

817 ('Instance.node is being updated (%r) ' 

818 'but compute_id is not') % self.node) 

819 else: 

820 LOG.warning('Instance %s node is being updated to %r but ' 

821 'compute_id is not', self.uuid, self.node) 

822 

823 for field in self.fields: 

824 # NOTE(danms): For object fields, we construct and call a 

825 # helper method like self._save_$attrname() 

826 if (self.obj_attr_is_set(field) and 

827 isinstance(self.fields[field], fields.ObjectField)): 

828 try: 

829 getattr(self, '_save_%s' % field)(context) 

830 except AttributeError: 

831 if field in _INSTANCE_EXTRA_FIELDS: 831 ↛ 834line 831 didn't jump to line 834 because the condition on line 831 was always true

832 self._save_extra_generic(field) 

833 continue 

834 LOG.exception('No save handler for %s', field, 

835 instance=self) 

836 except db_exc.DBReferenceError as exp: 

837 if exp.key != 'instance_uuid': 

838 raise 

839 # NOTE(melwitt): This will happen if we instance.save() 

840 # before an instance.create() and FK constraint fails. 

841 # In practice, this occurs in cells during a delete of 

842 # an unscheduled instance. Otherwise, it could happen 

843 # as a result of bug. 

844 raise exception.InstanceNotFound(instance_id=self.uuid) 

845 elif field in changes: 

846 updates[field] = self[field] 

847 

848 if self._extra_values_to_save: 

849 db.instance_extra_update_by_uuid(context, self.uuid, 

850 self._extra_values_to_save) 

851 

852 if not updates: 

853 return 

854 

855 # Cleaned needs to be turned back into an int here 

856 if 'cleaned' in updates: 856 ↛ 857line 856 didn't jump to line 857 because the condition on line 856 was never true

857 if updates['cleaned']: 

858 updates['cleaned'] = 1 

859 else: 

860 updates['cleaned'] = 0 

861 

862 if expected_task_state is not None: 

863 updates['expected_task_state'] = expected_task_state 

864 if expected_vm_state is not None: 

865 updates['expected_vm_state'] = expected_vm_state 

866 

867 expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS 

868 if self.obj_attr_is_set(attr)] 

869 if 'pci_devices' in expected_attrs: 

870 # NOTE(danms): We don't refresh pci_devices on save right now 

871 expected_attrs.remove('pci_devices') 

872 

873 # NOTE(alaski): We need to pull system_metadata for the 

874 # notification.send_update() below. If we don't there's a KeyError 

875 # when it tries to extract the flavor. 

876 if 'system_metadata' not in expected_attrs: 

877 expected_attrs.append('system_metadata') 

878 old_ref, inst_ref = db.instance_update_and_get_original( 

879 context, self.uuid, updates, 

880 columns_to_join=_expected_cols(expected_attrs)) 

881 self._from_db_object(context, self, inst_ref, 

882 expected_attrs=expected_attrs) 

883 

884 # NOTE(danms): We have to be super careful here not to trigger 

885 # any lazy-loads that will unmigrate or unbackport something. So, 

886 # make a copy of the instance for notifications first. 

887 new_ref = self.obj_clone() 

888 notifications.send_update(context, old_ref, new_ref) 

889 

890 self.obj_reset_changes() 

891 

892 @base.remotable 

893 def refresh(self, use_slave=False): 

894 extra = [field for field in INSTANCE_OPTIONAL_ATTRS 

895 if self.obj_attr_is_set(field)] 

896 current = self.__class__.get_by_uuid(self._context, uuid=self.uuid, 

897 expected_attrs=extra, 

898 use_slave=use_slave) 

899 # NOTE(danms): We orphan the instance copy so we do not unexpectedly 

900 # trigger a lazy-load (which would mean we failed to calculate the 

901 # expected_attrs properly) 

902 current._context = None 

903 

904 for field in self.fields: 

905 if field not in self: 

906 continue 

907 if field not in current: 

908 # If the field isn't in current we should not 

909 # touch it, triggering a likely-recursive lazy load. 

910 # Log it so we can see it happening though, as it 

911 # probably isn't expected in most cases. 

912 LOG.debug('Field %s is set but not in refreshed ' 

913 'instance, skipping', field) 

914 continue 

915 if field == 'info_cache': 

916 self.info_cache.refresh() 

917 elif self[field] != current[field]: 

918 self[field] = current[field] 

919 self.obj_reset_changes() 

920 

921 def _load_generic(self, attrname): 

922 instance = self.__class__.get_by_uuid(self._context, 

923 uuid=self.uuid, 

924 expected_attrs=[attrname]) 

925 

926 if attrname not in instance: 926 ↛ 928line 926 didn't jump to line 928 because the condition on line 926 was never true

927 # NOTE(danms): Never allow us to recursively-load 

928 raise exception.ObjectActionError( 

929 action='obj_load_attr', 

930 reason=_('loading %s requires recursion') % attrname) 

931 

932 # NOTE(danms): load anything we don't already have from the 

933 # instance we got from the database to make the most of the 

934 # performance hit. 

935 for field in self.fields: 

936 if field in instance and field not in self: 

937 setattr(self, field, getattr(instance, field)) 

938 

939 def _load_fault(self): 

940 self.fault = objects.InstanceFault.get_latest_for_instance( 

941 self._context, self.uuid) 

942 

943 def _load_numa_topology(self, db_topology=_NO_DATA_SENTINEL): 

944 if db_topology is None: 

945 self.numa_topology = None 

946 elif db_topology is not _NO_DATA_SENTINEL: 

947 self.numa_topology = objects.InstanceNUMATopology.obj_from_db_obj( 

948 self._context, self.uuid, db_topology) 

949 else: 

950 try: 

951 self.numa_topology = \ 

952 objects.InstanceNUMATopology.get_by_instance_uuid( 

953 self._context, self.uuid) 

954 except exception.NumaTopologyNotFound: 

955 self.numa_topology = None 

956 

957 def _load_pci_requests(self, db_requests=_NO_DATA_SENTINEL): 

958 if db_requests is not _NO_DATA_SENTINEL: 

959 self.pci_requests = objects.InstancePCIRequests.obj_from_db( 

960 self._context, self.uuid, db_requests) 

961 else: 

962 self.pci_requests = \ 

963 objects.InstancePCIRequests.get_by_instance_uuid( 

964 self._context, self.uuid) 

965 

966 def _load_device_metadata(self, db_dev_meta=_NO_DATA_SENTINEL): 

967 if db_dev_meta is None: 

968 self.device_metadata = None 

969 elif db_dev_meta is not _NO_DATA_SENTINEL: 969 ↛ 974line 969 didn't jump to line 974 because the condition on line 969 was always true

970 self.device_metadata = \ 

971 objects.InstanceDeviceMetadata.obj_from_db( 

972 self._context, db_dev_meta) 

973 else: 

974 self.device_metadata = \ 

975 objects.InstanceDeviceMetadata.get_by_instance_uuid( 

976 self._context, self.uuid) 

977 

978 def _load_flavor(self): 

979 instance = self.__class__.get_by_uuid( 

980 self._context, uuid=self.uuid, 

981 expected_attrs=['flavor']) 

982 

983 # NOTE(danms): Orphan the instance to make sure we don't lazy-load 

984 # anything below 

985 instance._context = None 

986 self.flavor = instance.flavor 

987 self.old_flavor = instance.old_flavor 

988 self.new_flavor = instance.new_flavor 

989 

990 def _load_vcpu_model(self, db_vcpu_model=_NO_DATA_SENTINEL): 

991 if db_vcpu_model is None: 

992 self.vcpu_model = None 

993 elif db_vcpu_model is _NO_DATA_SENTINEL: 993 ↛ 994line 993 didn't jump to line 994 because the condition on line 993 was never true

994 self.vcpu_model = objects.VirtCPUModel.get_by_instance_uuid( 

995 self._context, self.uuid) 

996 else: 

997 db_vcpu_model = jsonutils.loads(db_vcpu_model) 

998 self.vcpu_model = objects.VirtCPUModel.obj_from_primitive( 

999 db_vcpu_model) 

1000 

1001 def _load_ec2_ids(self): 

1002 self.ec2_ids = objects.EC2Ids.get_by_instance(self._context, self) 

1003 

1004 def _load_pci_devices(self): 

1005 self.pci_devices = objects.PciDeviceList.get_by_instance_uuid( 

1006 self._context, self.uuid) 

1007 

1008 def _load_migration_context(self, db_context=_NO_DATA_SENTINEL): 

1009 if db_context is _NO_DATA_SENTINEL: 

1010 try: 

1011 self.migration_context = ( 

1012 objects.MigrationContext.get_by_instance_uuid( 

1013 self._context, self.uuid)) 

1014 except exception.MigrationContextNotFound: 

1015 self.migration_context = None 

1016 elif db_context is None: 

1017 self.migration_context = None 

1018 else: 

1019 self.migration_context = objects.MigrationContext.obj_from_db_obj( 

1020 db_context) 

1021 

1022 def _load_keypairs(self, db_keypairs=_NO_DATA_SENTINEL): 

1023 if db_keypairs is _NO_DATA_SENTINEL: 

1024 inst = objects.Instance.get_by_uuid(self._context, self.uuid, 

1025 expected_attrs=['keypairs']) 

1026 if 'keypairs' in inst: 

1027 self.keypairs = inst.keypairs 

1028 self.keypairs.obj_reset_changes(recursive=True) 

1029 self.obj_reset_changes(['keypairs']) 

1030 else: 

1031 self.keypairs = objects.KeyPairList(objects=[]) 

1032 # NOTE(danms): We leave the keypairs attribute dirty in hopes 

1033 # someone else will save it for us 

1034 elif db_keypairs: 

1035 self.keypairs = objects.KeyPairList.obj_from_primitive( 

1036 jsonutils.loads(db_keypairs)) 

1037 self.obj_reset_changes(['keypairs']) 

1038 

1039 def _load_tags(self): 

1040 self.tags = objects.TagList.get_by_resource_id( 

1041 self._context, self.uuid) 

1042 

1043 def _load_trusted_certs(self, db_trusted_certs=_NO_DATA_SENTINEL): 

1044 if db_trusted_certs is None: 

1045 self.trusted_certs = None 

1046 elif db_trusted_certs is _NO_DATA_SENTINEL: 

1047 self.trusted_certs = objects.TrustedCerts.get_by_instance_uuid( 

1048 self._context, self.uuid) 

1049 else: 

1050 self.trusted_certs = objects.TrustedCerts.obj_from_primitive( 

1051 jsonutils.loads(db_trusted_certs)) 

1052 

1053 def _load_resources(self, db_resources=_NO_DATA_SENTINEL): 

1054 if db_resources is None: 

1055 self.resources = None 

1056 elif db_resources is _NO_DATA_SENTINEL: 

1057 self.resources = objects.ResourceList.get_by_instance_uuid( 

1058 self._context, self.uuid) 

1059 else: 

1060 self.resources = objects.ResourceList.obj_from_primitive( 

1061 jsonutils.loads(db_resources)) 

1062 

1063 def apply_migration_context(self): 

1064 if self.migration_context: 

1065 self._set_migration_context_to_instance(prefix='new_') 

1066 else: 

1067 LOG.debug("Trying to apply a migration context that does not " 

1068 "seem to be set for this instance", instance=self) 

1069 

1070 def revert_migration_context(self): 

1071 if self.migration_context: 1071 ↛ 1074line 1071 didn't jump to line 1074 because the condition on line 1071 was always true

1072 self._set_migration_context_to_instance(prefix='old_') 

1073 else: 

1074 LOG.debug("Trying to revert a migration context that does not " 

1075 "seem to be set for this instance", instance=self) 

1076 

1077 def _set_migration_context_to_instance(self, prefix): 

1078 for inst_attr_name in _MIGRATION_CONTEXT_ATTRS: 

1079 setattr(self, inst_attr_name, None) 

1080 attr_name = prefix + inst_attr_name 

1081 if attr_name in self.migration_context: 

1082 attr_value = getattr( 

1083 self.migration_context, attr_name) 

1084 setattr(self, inst_attr_name, attr_value) 

1085 

1086 @contextlib.contextmanager 

1087 def mutated_migration_context(self, revert=False): 

1088 """Context manager to temporarily apply/revert the migration context. 

1089 

1090 Calling .save() from within the context manager means that the mutated 

1091 context will be saved which can cause incorrect resource tracking, and 

1092 should be avoided. 

1093 """ 

1094 # First check to see if we even have a migration context set and if not 

1095 # we can exit early without lazy-loading other attributes. 

1096 if 'migration_context' in self and self.migration_context is None: 

1097 yield 

1098 return 

1099 

1100 current_values = {} 

1101 for attr_name in _MIGRATION_CONTEXT_ATTRS: 

1102 current_values[attr_name] = getattr(self, attr_name) 

1103 if revert: 

1104 self.revert_migration_context() 

1105 else: 

1106 self.apply_migration_context() 

1107 try: 

1108 yield 

1109 finally: 

1110 for attr_name in _MIGRATION_CONTEXT_ATTRS: 

1111 setattr(self, attr_name, current_values[attr_name]) 

1112 

1113 @base.remotable 

1114 def drop_migration_context(self): 

1115 if self.migration_context: 

1116 db.instance_extra_update_by_uuid(self._context, self.uuid, 

1117 {'migration_context': None}) 

1118 self.migration_context = None 

1119 

1120 def clear_numa_topology(self): 

1121 numa_topology = self.numa_topology 

1122 if numa_topology is not None: 

1123 self.numa_topology = numa_topology.clear_host_pinning() 

1124 

1125 @base.lazy_load_counter 

1126 def obj_load_attr(self, attrname): 

1127 # NOTE(danms): We can't lazy-load anything without a context and a uuid 

1128 if not self._context: 

1129 if 'uuid' in self: 

1130 LOG.debug( 

1131 "Lazy-load of '%s' attempted by orphaned instance", 

1132 attrname, instance=self 

1133 ) 

1134 raise exception.OrphanedObjectError(method='obj_load_attr', 

1135 objtype=self.obj_name()) 

1136 if 'uuid' not in self: 

1137 raise exception.ObjectActionError( 

1138 action='obj_load_attr', 

1139 reason=_('attribute %s not lazy-loadable') % attrname) 

1140 

1141 LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s", 

1142 {'attr': attrname, 

1143 'name': self.obj_name(), 

1144 'uuid': self.uuid, 

1145 }) 

1146 

1147 with utils.temporary_mutation(self._context, read_deleted='yes'): 

1148 self._obj_load_attr(attrname) 

1149 

1150 def _obj_load_attr(self, attrname): 

1151 """Internal method for loading attributes from instances. 

1152 

1153 NOTE: Do not use this directly. 

1154 

1155 This method contains the implementation of lazy-loading attributes 

1156 from Instance object, minus some massaging of the context and 

1157 error-checking. This should always be called with the object-local 

1158 context set for reading deleted instances and with uuid set. All 

1159 of the code below depends on those two things. Thus, this should 

1160 only be called from obj_load_attr() itself. 

1161 

1162 :param attrname: The name of the attribute to be loaded 

1163 """ 

1164 

1165 # NOTE(danms): We handle some fields differently here so that we 

1166 # can be more efficient 

1167 if attrname == 'fault': 

1168 self._load_fault() 

1169 elif attrname == 'numa_topology': 

1170 self._load_numa_topology() 

1171 elif attrname == 'device_metadata': 1171 ↛ 1172line 1171 didn't jump to line 1172 because the condition on line 1171 was never true

1172 self._load_device_metadata() 

1173 elif attrname == 'pci_requests': 

1174 self._load_pci_requests() 

1175 elif attrname == 'vcpu_model': 1175 ↛ 1176line 1175 didn't jump to line 1176 because the condition on line 1175 was never true

1176 self._load_vcpu_model() 

1177 elif attrname == 'ec2_ids': 

1178 self._load_ec2_ids() 

1179 elif attrname == 'migration_context': 

1180 self._load_migration_context() 

1181 elif attrname == 'keypairs': 

1182 # NOTE(danms): Let keypairs control its own destiny for 

1183 # resetting changes. 

1184 return self._load_keypairs() 

1185 elif attrname == 'trusted_certs': 

1186 return self._load_trusted_certs() 

1187 elif attrname == 'resources': 

1188 return self._load_resources() 

1189 elif attrname == 'security_groups': 

1190 self.security_groups = objects.SecurityGroupList() 

1191 elif attrname == 'pci_devices': 

1192 self._load_pci_devices() 

1193 elif 'flavor' in attrname: 

1194 self._load_flavor() 

1195 elif attrname == 'services' and self.deleted: 

1196 # NOTE(mriedem): The join in the data model for instances.services 

1197 # filters on instances.deleted == 0, so if the instance is deleted 

1198 # don't attempt to even load services since we'll fail. 

1199 self.services = objects.ServiceList(self._context) 

1200 elif attrname == 'tags': 

1201 if self.deleted: 

1202 # NOTE(mriedem): Same story as services, the DB API query 

1203 # in instance_tag_get_by_instance_uuid will fail if the 

1204 # instance has been deleted so just return an empty tag list. 

1205 self.tags = objects.TagList(self._context) 

1206 else: 

1207 self._load_tags() 

1208 elif attrname in self.fields and attrname != 'id': 

1209 # NOTE(danms): We've never let 'id' be lazy-loaded, and use its 

1210 # absence as a sentinel that it hasn't been created in the database 

1211 # yet, so refuse to do so here. 

1212 self._load_generic(attrname) 

1213 else: 

1214 # NOTE(danms): This is historically what we did for 

1215 # something not in a field that was force-loaded. So, just 

1216 # do this for consistency. 

1217 raise exception.ObjectActionError( 

1218 action='obj_load_attr', 

1219 reason=_('attribute %s not lazy-loadable') % attrname) 

1220 

1221 self.obj_reset_changes([attrname]) 

1222 

1223 def get_flavor(self, namespace=None): 

1224 prefix = ('%s_' % namespace) if namespace is not None else '' 

1225 attr = '%sflavor' % prefix 

1226 try: 

1227 return getattr(self, attr) 

1228 except exception.FlavorNotFound: 

1229 # NOTE(danms): This only happens in the case where we don't 

1230 # have flavor information in instance_extra, and doing 

1231 # this triggers a lookup based on our instance_type_id for 

1232 # (very) legacy instances. That legacy code expects a None here, 

1233 # so emulate it for this helper, even though the actual attribute 

1234 # is not nullable. 

1235 return None 

1236 

1237 @base.remotable 

1238 def delete_metadata_key(self, key): 

1239 """Optimized metadata delete method. 

1240 

1241 This provides a more efficient way to delete a single metadata 

1242 key, instead of just calling instance.save(). This should be called 

1243 with the key still present in self.metadata, which it will update 

1244 after completion. 

1245 """ 

1246 db.instance_metadata_delete(self._context, self.uuid, key) 

1247 md_was_changed = 'metadata' in self.obj_what_changed() 

1248 del self.metadata[key] 

1249 self._orig_metadata.pop(key, None) 

1250 notifications.send_update(self._context, self, self) 

1251 if not md_was_changed: 1251 ↛ exitline 1251 didn't return from function 'delete_metadata_key' because the condition on line 1251 was always true

1252 self.obj_reset_changes(['metadata']) 

1253 

1254 def get_network_info(self): 

1255 if self.info_cache is None: 

1256 return network_model.NetworkInfo.hydrate([]) 

1257 return self.info_cache.network_info 

1258 

1259 def get_bdms(self): 

1260 return objects.BlockDeviceMappingList.get_by_instance_uuid( 

1261 self._context, self.uuid) 

1262 

1263 def get_shares(self): 

1264 return objects.ShareMappingList.get_by_instance_uuid( 

1265 self._context, self.uuid) 

1266 

1267 def remove_pci_device_and_request(self, pci_device): 

1268 """Remove the PciDevice and the related InstancePciRequest""" 

1269 if pci_device in self.pci_devices.objects: 1269 ↛ 1271line 1269 didn't jump to line 1271 because the condition on line 1269 was always true

1270 self.pci_devices.objects.remove(pci_device) 

1271 self.pci_requests.requests = [ 

1272 pci_req for pci_req in self.pci_requests.requests 

1273 if pci_req.request_id != pci_device.request_id] 

1274 

1275 def get_pci_devices( 

1276 self, 

1277 source: ty.Optional[int] = None, 

1278 request_id: ty.Optional[str] = None, 

1279 ) -> ty.List["objects.PciDevice"]: 

1280 """Return the PCI devices allocated to the instance 

1281 

1282 :param source: Filter by source. It can be 

1283 InstancePCIRequest.FLAVOR_ALIAS or InstancePCIRequest.NEUTRON_PORT 

1284 or None. None means returns devices from both type of requests. 

1285 :param request_id: Filter by PciDevice.request_id. None means do not 

1286 filter by request_id. 

1287 :return: a list of matching PciDevice objects 

1288 """ 

1289 if not self.pci_devices: 

1290 # return early to avoid an extra lazy load on self.pci_requests 

1291 # if there are no devices allocated to be filtered 

1292 return [] 

1293 

1294 devs = self.pci_devices.objects 

1295 

1296 if request_id is not None: 

1297 devs = [dev for dev in devs if dev.request_id == request_id] 

1298 

1299 if source is not None: 

1300 # NOTE(gibi): this happens to work for the old requests when the 

1301 # request has request_id None and therefore the device allocated 

1302 # due to that request has request_id None too, so they will be 

1303 # mapped via the None key. 

1304 req_id_to_req = { 

1305 req.request_id: req for req in self.pci_requests.requests 

1306 } 

1307 devs = [ 

1308 dev 

1309 for dev in devs 

1310 if (req_id_to_req[dev.request_id].source == source) 

1311 ] 

1312 

1313 return devs 

1314 

1315 

1316def _make_instance_list(context, inst_list, db_inst_list, expected_attrs): 

1317 get_fault = expected_attrs and 'fault' in expected_attrs 

1318 inst_faults = {} 

1319 if get_fault: 

1320 # Build an instance_uuid:latest-fault mapping 

1321 expected_attrs.remove('fault') 

1322 instance_uuids = [inst['uuid'] for inst in db_inst_list] 

1323 faults = objects.InstanceFaultList.get_by_instance_uuids( 

1324 context, instance_uuids) 

1325 for fault in faults: 

1326 if fault.instance_uuid not in inst_faults: 

1327 inst_faults[fault.instance_uuid] = fault 

1328 

1329 inst_cls = objects.Instance 

1330 

1331 inst_list.objects = [] 

1332 for db_inst in db_inst_list: 

1333 inst_obj = inst_cls._from_db_object( 

1334 context, inst_cls(context), db_inst, 

1335 expected_attrs=expected_attrs) 

1336 if get_fault: 

1337 inst_obj.fault = inst_faults.get(inst_obj.uuid, None) 

1338 inst_list.objects.append(inst_obj) 

1339 inst_list.obj_reset_changes() 

1340 return inst_list 

1341 

1342 

1343@db.pick_context_manager_writer 

1344def populate_missing_availability_zones(context, max_count): 

1345 # instances without host have no reasonable AZ to set 

1346 not_empty_host = models.Instance.host != None # noqa E711 

1347 instances = (context.session.query(models.Instance). 

1348 filter(not_empty_host). 

1349 filter_by(availability_zone=None).limit(max_count).all()) 

1350 count_all = len(instances) 

1351 count_hit = 0 

1352 for instance in instances: 

1353 az = avail_zone.get_instance_availability_zone(context, instance) 

1354 instance.availability_zone = az 

1355 instance.save(context.session) 

1356 count_hit += 1 

1357 return count_all, count_hit 

1358 

1359 

1360@db.pick_context_manager_writer 

1361def populate_instance_compute_id(context, max_count): 

1362 instances = (context.session.query(models.Instance). 

1363 filter(models.Instance.compute_id == None). # noqa E711 

1364 limit(max_count).all()) 

1365 count_all = count_hit = 0 

1366 rd_context = nova_context.get_admin_context(read_deleted='yes') 

1367 for instance in instances: 

1368 count_all += 1 

1369 try: 

1370 node = objects.ComputeNode.get_by_host_and_nodename(rd_context, 

1371 instance.host, 

1372 instance.node) 

1373 except exception.ComputeHostNotFound: 

1374 LOG.error('Unable to migrate instance because host %s with ' 

1375 'node %s not found', instance.host, instance.node, 

1376 instance=instance) 

1377 continue 

1378 instance.compute_id = node.id 

1379 instance.save(context.session) 

1380 count_hit += 1 

1381 return count_all, count_hit 

1382 

1383 

1384@base.NovaObjectRegistry.register 

1385class InstanceList(base.ObjectListBase, base.NovaObject): 

1386 # Version 2.0: Initial Version 

1387 # Version 2.1: Add get_uuids_by_host() 

1388 # Version 2.2: Pagination for get_active_by_window_joined() 

1389 # Version 2.3: Add get_count_by_vm_state() 

1390 # Version 2.4: Add get_counts() 

1391 # Version 2.5: Add get_uuids_by_host_and_node() 

1392 # Version 2.6: Add get_uuids_by_hosts() 

1393 VERSION = '2.6' 

1394 

1395 fields = { 

1396 'objects': fields.ListOfObjectsField('Instance'), 

1397 } 

1398 

1399 @classmethod 

1400 @db.select_db_reader_mode 

1401 def _get_by_filters_impl(cls, context, filters, 

1402 sort_key='created_at', sort_dir='desc', limit=None, 

1403 marker=None, expected_attrs=None, use_slave=False, 

1404 sort_keys=None, sort_dirs=None): 

1405 if sort_keys or sort_dirs: 

1406 db_inst_list = db.instance_get_all_by_filters_sort( 

1407 context, filters, limit=limit, marker=marker, 

1408 columns_to_join=_expected_cols(expected_attrs), 

1409 sort_keys=sort_keys, sort_dirs=sort_dirs) 

1410 else: 

1411 db_inst_list = db.instance_get_all_by_filters( 

1412 context, filters, sort_key, sort_dir, limit=limit, 

1413 marker=marker, columns_to_join=_expected_cols(expected_attrs)) 

1414 return db_inst_list 

1415 

1416 @base.remotable_classmethod 

1417 def get_by_filters(cls, context, filters, 

1418 sort_key='created_at', sort_dir='desc', limit=None, 

1419 marker=None, expected_attrs=None, use_slave=False, 

1420 sort_keys=None, sort_dirs=None): 

1421 db_inst_list = cls._get_by_filters_impl( 

1422 context, filters, sort_key=sort_key, sort_dir=sort_dir, 

1423 limit=limit, marker=marker, expected_attrs=expected_attrs, 

1424 use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs) 

1425 # NOTE(melwitt): _make_instance_list could result in joined objects' 

1426 # (from expected_attrs) _from_db_object methods being called during 

1427 # Instance._from_db_object, each of which might choose to perform 

1428 # database writes. So, we call this outside of _get_by_filters_impl to 

1429 # avoid being nested inside a 'reader' database transaction context. 

1430 return _make_instance_list(context, cls(), db_inst_list, 

1431 expected_attrs) 

1432 

1433 @staticmethod 

1434 @db.select_db_reader_mode 

1435 def _db_instance_get_all_by_host(context, host, columns_to_join, 

1436 use_slave=False): 

1437 return db.instance_get_all_by_host(context, host, 

1438 columns_to_join=columns_to_join) 

1439 

1440 @base.remotable_classmethod 

1441 def get_by_host(cls, context, host, expected_attrs=None, use_slave=False): 

1442 db_inst_list = cls._db_instance_get_all_by_host( 

1443 context, host, columns_to_join=_expected_cols(expected_attrs), 

1444 use_slave=use_slave) 

1445 return _make_instance_list(context, cls(), db_inst_list, 

1446 expected_attrs) 

1447 

1448 @base.remotable_classmethod 

1449 def get_by_host_and_node(cls, context, host, node, expected_attrs=None): 

1450 db_inst_list = db.instance_get_all_by_host_and_node( 

1451 context, host, node, 

1452 columns_to_join=_expected_cols(expected_attrs)) 

1453 return _make_instance_list(context, cls(), db_inst_list, 

1454 expected_attrs) 

1455 

1456 @staticmethod 

1457 @db.pick_context_manager_reader 

1458 def _get_uuids_by_host_and_node(context, host, node): 

1459 return context.session.query( 

1460 models.Instance.uuid).filter_by( 

1461 host=host).filter_by(node=node).filter_by(deleted=0).all() 

1462 

1463 @base.remotable_classmethod 

1464 def get_uuids_by_host_and_node(cls, context, host, node): 

1465 """Return non-deleted instance UUIDs for the given host and node. 

1466 

1467 :param context: nova auth request context 

1468 :param host: Filter instances on this host. 

1469 :param node: Filter instances on this node. 

1470 :returns: list of non-deleted instance UUIDs on the given host and node 

1471 """ 

1472 return cls._get_uuids_by_host_and_node(context, host, node) 

1473 

1474 @base.remotable_classmethod 

1475 def get_by_host_and_not_type(cls, context, host, type_id=None, 

1476 expected_attrs=None): 

1477 db_inst_list = db.instance_get_all_by_host_and_not_type( 

1478 context, host, type_id=type_id) 

1479 return _make_instance_list(context, cls(), db_inst_list, 

1480 expected_attrs) 

1481 

1482 @base.remotable_classmethod 

1483 def get_all(cls, context, expected_attrs=None): 

1484 """Returns all instances on all nodes.""" 

1485 db_instances = db.instance_get_all( 

1486 context, columns_to_join=_expected_cols(expected_attrs)) 

1487 return _make_instance_list(context, cls(), db_instances, 

1488 expected_attrs) 

1489 

1490 @base.remotable_classmethod 

1491 def get_hung_in_rebooting(cls, context, reboot_window, 

1492 expected_attrs=None): 

1493 db_inst_list = db.instance_get_all_hung_in_rebooting(context, 

1494 reboot_window) 

1495 return _make_instance_list(context, cls(), db_inst_list, 

1496 expected_attrs) 

1497 

1498 @staticmethod 

1499 @db.select_db_reader_mode 

1500 def _db_instance_get_active_by_window_joined( 

1501 context, begin, end, project_id, host, columns_to_join, 

1502 use_slave=False, limit=None, marker=None): 

1503 return db.instance_get_active_by_window_joined( 

1504 context, begin, end, project_id, host, 

1505 columns_to_join=columns_to_join, limit=limit, marker=marker) 

1506 

1507 @base.remotable_classmethod 

1508 def _get_active_by_window_joined(cls, context, begin, end=None, 

1509 project_id=None, host=None, 

1510 expected_attrs=None, use_slave=False, 

1511 limit=None, marker=None): 

1512 # NOTE(mriedem): We need to convert the begin/end timestamp strings 

1513 # to timezone-aware datetime objects for the DB API call. 

1514 begin = timeutils.parse_isotime(begin) 

1515 end = timeutils.parse_isotime(end) if end else None 

1516 db_inst_list = cls._db_instance_get_active_by_window_joined( 

1517 context, begin, end, project_id, host, 

1518 columns_to_join=_expected_cols(expected_attrs), 

1519 use_slave=use_slave, limit=limit, marker=marker) 

1520 return _make_instance_list(context, cls(), db_inst_list, 

1521 expected_attrs) 

1522 

1523 @classmethod 

1524 def get_active_by_window_joined(cls, context, begin, end=None, 

1525 project_id=None, host=None, 

1526 expected_attrs=None, use_slave=False, 

1527 limit=None, marker=None): 

1528 """Get instances and joins active during a certain time window. 

1529 

1530 :param:context: nova request context 

1531 :param:begin: datetime for the start of the time window 

1532 :param:end: datetime for the end of the time window 

1533 :param:project_id: used to filter instances by project 

1534 :param:host: used to filter instances on a given compute host 

1535 :param:expected_attrs: list of related fields that can be joined 

1536 in the database layer when querying for instances 

1537 :param use_slave if True, ship this query off to a DB slave 

1538 :param limit: maximum number of instances to return per page 

1539 :param marker: last instance uuid from the previous page 

1540 :returns: InstanceList 

1541 

1542 """ 

1543 # NOTE(mriedem): We have to convert the datetime objects to string 

1544 # primitives for the remote call. 

1545 begin = utils.isotime(begin) 

1546 end = utils.isotime(end) if end else None 

1547 return cls._get_active_by_window_joined(context, begin, end, 

1548 project_id, host, 

1549 expected_attrs, 

1550 use_slave=use_slave, 

1551 limit=limit, marker=marker) 

1552 

1553 # TODO(stephenfin): Remove this as it's related to nova-network 

1554 @base.remotable_classmethod 

1555 def get_by_security_group_id(cls, context, security_group_id): 

1556 raise NotImplementedError() 

1557 

1558 # TODO(stephenfin): Remove this as it's related to nova-network 

1559 @classmethod 

1560 def get_by_security_group(cls, context, security_group): 

1561 raise NotImplementedError() 

1562 

1563 # TODO(stephenfin): Remove this as it's related to nova-network 

1564 @base.remotable_classmethod 

1565 def get_by_grantee_security_group_ids(cls, context, security_group_ids): 

1566 raise NotImplementedError() 

1567 

1568 def fill_faults(self): 

1569 """Batch query the database for our instances' faults. 

1570 

1571 :returns: A list of instance uuids for which faults were found. 

1572 """ 

1573 uuids = [inst.uuid for inst in self] 

1574 faults = objects.InstanceFaultList.get_latest_by_instance_uuids( 

1575 self._context, uuids) 

1576 faults_by_uuid = {} 

1577 for fault in faults: 

1578 faults_by_uuid[fault.instance_uuid] = fault 

1579 

1580 for instance in self: 

1581 if instance.uuid in faults_by_uuid: 

1582 instance.fault = faults_by_uuid[instance.uuid] 

1583 else: 

1584 # NOTE(danms): Otherwise the caller will cause a lazy-load 

1585 # when checking it, and we know there are none 

1586 instance.fault = None 

1587 instance.obj_reset_changes(['fault']) 

1588 

1589 return faults_by_uuid.keys() 

1590 

1591 def fill_metadata(self): 

1592 # NOTE(danms): This only fills system_metadata currently, but could 

1593 # be extended to support user metadata if needed in the future. 

1594 # Make a uuid-indexed dict of non-object instance dicts that the DB 

1595 # layer can use. They need only contain the uuid of the instances 

1596 # we are looking up. Any of them that already have system_metadata 

1597 # need not be included. 

1598 db_inst_shells = {inst.uuid: {'uuid': inst.uuid} for inst in self 

1599 if 'system_metadata' not in inst} 

1600 if db_inst_shells: 

1601 updates = db.instances_fill_metadata( 

1602 self._context, 

1603 db_inst_shells.values(), 

1604 manual_joins=['system_metadata']) 

1605 updated = {i['uuid']: i for i in updates} 

1606 for inst in [i for i in self if i.uuid in updated]: 

1607 # Patch up our instances with system_metadata from the fill 

1608 # operation 

1609 inst.system_metadata = utils.instance_sys_meta(updated) 

1610 

1611 @base.remotable_classmethod 

1612 def get_uuids_by_host(cls, context, host): 

1613 return db.instance_get_all_uuids_by_hosts(context, [host])[host] 

1614 

1615 @base.remotable_classmethod 

1616 def get_uuids_by_hosts(cls, context, hosts): 

1617 """Returns a dict, keyed by hypervisor hostname, of a list of instance 

1618 UUIDs associated with that compute node. 

1619 """ 

1620 return db.instance_get_all_uuids_by_hosts(context, hosts) 

1621 

1622 @staticmethod 

1623 @db.pick_context_manager_reader 

1624 def _get_count_by_vm_state_in_db(context, project_id, user_id, vm_state): 

1625 return context.session.query(models.Instance.id).\ 

1626 filter_by(deleted=0).\ 

1627 filter_by(project_id=project_id).\ 

1628 filter_by(user_id=user_id).\ 

1629 filter_by(vm_state=vm_state).\ 

1630 count() 

1631 

1632 @base.remotable_classmethod 

1633 def get_count_by_vm_state(cls, context, project_id, user_id, vm_state): 

1634 return cls._get_count_by_vm_state_in_db(context, project_id, user_id, 

1635 vm_state) 

1636 

1637 @staticmethod 

1638 @db.pick_context_manager_reader 

1639 def _get_counts_in_db(context, project_id, user_id=None): 

1640 # NOTE(melwitt): Copied from nova/db/main/api.py: 

1641 # It would be better to have vm_state not be nullable 

1642 # but until then we test it explicitly as a workaround. 

1643 not_soft_deleted = sa.or_( 

1644 models.Instance.vm_state != vm_states.SOFT_DELETED, 

1645 models.Instance.vm_state == sql.null() 

1646 ) 

1647 project_query = context.session.query( 

1648 func.count(models.Instance.id), 

1649 func.sum(models.Instance.vcpus), 

1650 func.sum(models.Instance.memory_mb)).\ 

1651 filter_by(deleted=0).\ 

1652 filter(not_soft_deleted).\ 

1653 filter_by(project_id=project_id) 

1654 # NOTE(mriedem): Filter out hidden instances since there should be a 

1655 # non-hidden version of the instance in another cell database and the 

1656 # API will only show one of them, so we don't count the hidden copy. 

1657 project_query = project_query.filter( 

1658 sa.or_( 

1659 models.Instance.hidden == sql.false(), 

1660 models.Instance.hidden == sql.null(), 

1661 )) 

1662 

1663 project_result = project_query.first() 

1664 fields = ('instances', 'cores', 'ram') 

1665 project_counts = {field: int(project_result[idx] or 0) 

1666 for idx, field in enumerate(fields)} 

1667 counts = {'project': project_counts} 

1668 if user_id: 

1669 user_result = project_query.filter_by(user_id=user_id).first() 

1670 user_counts = {field: int(user_result[idx] or 0) 

1671 for idx, field in enumerate(fields)} 

1672 counts['user'] = user_counts 

1673 return counts 

1674 

1675 @base.remotable_classmethod 

1676 def get_counts(cls, context, project_id, user_id=None): 

1677 """Get the counts of Instance objects in the database. 

1678 

1679 :param context: The request context for database access 

1680 :param project_id: The project_id to count across 

1681 :param user_id: The user_id to count across 

1682 :returns: A dict containing the project-scoped counts and user-scoped 

1683 counts if user_id is specified. For example: 

1684 

1685 {'project': {'instances': <count across project>, 

1686 'cores': <count across project>, 

1687 'ram': <count across project}, 

1688 'user': {'instances': <count across user>, 

1689 'cores': <count across user>, 

1690 'ram': <count across user>}} 

1691 """ 

1692 return cls._get_counts_in_db(context, project_id, user_id=user_id) 

1693 

1694 @staticmethod 

1695 @db.pick_context_manager_reader 

1696 def _get_count_by_hosts(context, hosts): 

1697 return context.session.query(models.Instance).\ 

1698 filter_by(deleted=0).\ 

1699 filter(models.Instance.host.in_(hosts)).count() 

1700 

1701 @classmethod 

1702 def get_count_by_hosts(cls, context, hosts): 

1703 return cls._get_count_by_hosts(context, hosts)