Coverage for nova/network/neutron.py: 94%

1566 statements  

« prev     ^ index     » next       coverage.py v7.6.12, created at 2025-04-17 15:08 +0000

1# Copyright 2012 OpenStack Foundation 

2# All Rights Reserved 

3# Copyright (c) 2012 NEC Corporation 

4# 

5# Licensed under the Apache License, Version 2.0 (the "License"); you may 

6# not use this file except in compliance with the License. You may obtain 

7# a copy of the License at 

8# 

9# http://www.apache.org/licenses/LICENSE-2.0 

10# 

11# Unless required by applicable law or agreed to in writing, software 

12# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

14# License for the specific language governing permissions and limitations 

15# under the License. 

16 

17""" 

18API and utilities for nova-network interactions. 

19""" 

20 

21import copy 

22import functools 

23import inspect 

24import time 

25import typing as ty 

26 

27from keystoneauth1 import loading as ks_loading 

28from neutronclient.common import exceptions as neutron_client_exc 

29from neutronclient.v2_0 import client as clientv20 

30from oslo_concurrency import lockutils 

31from oslo_log import log as logging 

32from oslo_utils import excutils 

33from oslo_utils import strutils 

34from oslo_utils import uuidutils 

35 

36from nova.accelerator import cyborg 

37from nova.compute import utils as compute_utils 

38import nova.conf 

39from nova import context as nova_context 

40from nova import exception 

41from nova.i18n import _ 

42from nova.network import constants 

43from nova.network import model as network_model 

44from nova import objects 

45from nova.objects import fields as obj_fields 

46from nova.pci import request as pci_request 

47from nova.pci import utils as pci_utils 

48from nova.pci import whitelist as pci_whitelist 

49from nova.policies import servers as servers_policies 

50from nova import profiler 

51from nova import service_auth 

52from nova import utils 

53 

54CONF = nova.conf.CONF 

55 

56LOG = logging.getLogger(__name__) 

57 

58_SESSION = None 

59_ADMIN_AUTH = None 

60 

61 

62def reset_state(): 

63 global _ADMIN_AUTH 

64 global _SESSION 

65 

66 _ADMIN_AUTH = None 

67 _SESSION = None 

68 

69 

70def _load_auth_plugin(conf): 

71 auth_plugin = ks_loading.load_auth_from_conf_options(conf, 

72 nova.conf.neutron.NEUTRON_GROUP) 

73 

74 if auth_plugin: 

75 return auth_plugin 

76 

77 if conf.neutron.auth_type is None: 77 ↛ 87line 77 didn't jump to line 87 because the condition on line 77 was always true

78 # If we're coming in through a REST API call for something like 

79 # creating a server, the end user is going to get a 500 response 

80 # which is accurate since the system is mis-configured, but we should 

81 # leave a breadcrumb for the operator that is checking the logs. 

82 LOG.error('The [neutron] section of your nova configuration file ' 

83 'must be configured for authentication with the networking ' 

84 'service endpoint. See the networking service install guide ' 

85 'for details: ' 

86 'https://docs.openstack.org/neutron/latest/install/') 

87 err_msg = _('Unknown auth type: %s') % conf.neutron.auth_type 

88 raise neutron_client_exc.Unauthorized(message=err_msg) 

89 

90 

91def get_binding_profile(port): 

92 """Convenience method to get the binding:profile from the port 

93 

94 The binding:profile in the port is undefined in the networking service 

95 API and is dependent on backend configuration. This means it could be 

96 an empty dict, None, or have some values. 

97 

98 :param port: dict port response body from the networking service API 

99 :returns: The port binding:profile dict; empty if not set on the port 

100 """ 

101 return port.get(constants.BINDING_PROFILE, {}) or {} 

102 

103 

104def update_instance_cache_with_nw_info(impl, context, instance, nw_info=None): 

105 if instance.deleted: 

106 LOG.debug('Instance is deleted, no further info cache update', 

107 instance=instance) 

108 return 

109 

110 try: 

111 if not isinstance(nw_info, network_model.NetworkInfo): 

112 nw_info = None 

113 if nw_info is None: 

114 nw_info = impl._get_instance_nw_info(context, instance) 

115 

116 LOG.debug('Updating instance_info_cache with network_info: %s', 

117 nw_info, instance=instance) 

118 

119 # NOTE(comstud): The save() method actually handles updating or 

120 # creating the instance. We don't need to retrieve the object 

121 # from the DB first. 

122 ic = objects.InstanceInfoCache.new(context, instance.uuid) 

123 ic.network_info = nw_info 

124 ic.save() 

125 instance.info_cache = ic 

126 except exception.InstanceNotFound as e: 

127 # The instance could have moved during a cross-cell migration when we 

128 # receive an external event from neutron. Avoid logging a traceback 

129 # when it happens. 

130 msg = str(e) 

131 if e.__class__.__name__.endswith('_Remote'): 131 ↛ 135line 131 didn't jump to line 135 because the condition on line 131 was always true

132 # If this exception was raised remotely over RPC, the traceback(s) 

133 # will be appended to the message. Truncate it in that case. 

134 msg = utils.safe_truncate(msg.split('\n', 1)[0], 255) 

135 LOG.info('Failed storing info cache due to: %s. ' 

136 'The instance may have moved to another cell during a ' 

137 'cross-cell migration', msg, instance=instance) 

138 raise exception.InstanceNotFound(message=msg) 

139 except Exception: 

140 with excutils.save_and_reraise_exception(): 

141 LOG.exception('Failed storing info cache', instance=instance) 

142 

143 

144def refresh_cache(f): 

145 """Decorator to update the instance_info_cache 

146 

147 Requires context and instance as function args 

148 """ 

149 argspec = inspect.getfullargspec(f) 

150 

151 @functools.wraps(f) 

152 def wrapper(self, context, *args, **kwargs): 

153 try: 

154 # get the instance from arguments (or raise ValueError) 

155 instance = kwargs.get('instance') 

156 if not instance: 156 ↛ 162line 156 didn't jump to line 162 because the condition on line 156 was always true

157 instance = args[argspec.args.index('instance') - 2] 

158 except ValueError: 

159 msg = _('instance is a required argument to use @refresh_cache') 

160 raise Exception(msg) 

161 

162 with lockutils.lock('refresh_cache-%s' % instance.uuid): 

163 # We need to call the wrapped function with the lock held to ensure 

164 # that it can call _get_instance_nw_info safely. 

165 res = f(self, context, *args, **kwargs) 

166 update_instance_cache_with_nw_info(self, context, instance, 

167 nw_info=res) 

168 # return the original function's return value 

169 return res 

170 return wrapper 

171 

172 

173@profiler.trace_cls("neutron_api") 

174class ClientWrapper(clientv20.Client): 

175 """A Neutron client wrapper class. 

176 

177 Wraps the callable methods, catches Unauthorized,Forbidden from Neutron and 

178 convert it to a 401,403 for Nova clients. 

179 """ 

180 

181 def __init__(self, base_client, admin): 

182 # Expose all attributes from the base_client instance 

183 self.__dict__ = base_client.__dict__ 

184 self.base_client = base_client 

185 self.admin = admin 

186 

187 def __getattribute__(self, name): 

188 obj = object.__getattribute__(self, name) 

189 if callable(obj): 

190 obj = object.__getattribute__(self, 'proxy')(obj) 

191 return obj 

192 

193 def proxy(self, obj): 

194 def wrapper(*args, **kwargs): 

195 try: 

196 ret = obj(*args, **kwargs) 

197 except neutron_client_exc.Unauthorized: 

198 if not self.admin: 

199 # Token is expired so Neutron is raising a 

200 # unauthorized exception, we should convert it to 

201 # raise a 401 to make client to handle a retry by 

202 # regenerating a valid token and trying a new 

203 # attempt. 

204 raise exception.Unauthorized() 

205 # In admin context if token is invalid Neutron client 

206 # should be able to regenerate a valid by using the 

207 # Neutron admin credential configuration located in 

208 # nova.conf. 

209 LOG.error("Neutron client was not able to generate a " 

210 "valid admin token, please verify Neutron " 

211 "admin credential located in nova.conf") 

212 raise exception.NeutronAdminCredentialConfigurationInvalid() 

213 except neutron_client_exc.Forbidden as e: 

214 raise exception.Forbidden(str(e)) 

215 return ret 

216 return wrapper 

217 

218 

219def _get_auth_plugin(context, admin=False): 

220 # NOTE(dprince): In the case where no auth_token is present we allow use of 

221 # neutron admin tenant credentials if it is an admin context. This is to 

222 # support some services (metadata API) where an admin context is used 

223 # without an auth token. 

224 global _ADMIN_AUTH 

225 user_auth = None 

226 if admin or (context.is_admin and not context.auth_token): 

227 if not _ADMIN_AUTH: 

228 _ADMIN_AUTH = _load_auth_plugin(CONF) 

229 user_auth = _ADMIN_AUTH 

230 

231 if context.auth_token or user_auth: 

232 # When user_auth = None, user_auth will be extracted from the context. 

233 return service_auth.get_auth_plugin(context, user_auth=user_auth) 

234 

235 # We did not get a user token and we should not be using 

236 # an admin token so log an error 

237 raise exception.Unauthorized() 

238 

239 

240def _get_session(): 

241 global _SESSION 

242 if not _SESSION: 

243 _SESSION = ks_loading.load_session_from_conf_options( 

244 CONF, nova.conf.neutron.NEUTRON_GROUP) 

245 return _SESSION 

246 

247 

248def get_client(context, admin=False): 

249 auth_plugin = _get_auth_plugin(context, admin=admin) 

250 session = _get_session() 

251 client_args = dict(session=session, 

252 auth=auth_plugin, 

253 global_request_id=context.global_id, 

254 connect_retries=CONF.neutron.http_retries) 

255 

256 # NOTE(efried): We build an adapter 

257 # to pull conf options 

258 # to pass to neutronclient 

259 # which uses them to build an Adapter. 

260 # This should be unwound at some point. 

261 adap = utils.get_ksa_adapter( 

262 'network', ksa_auth=auth_plugin, ksa_session=session) 

263 client_args = dict(client_args, 

264 service_type=adap.service_type, 

265 service_name=adap.service_name, 

266 interface=adap.interface, 

267 region_name=adap.region_name, 

268 endpoint_override=adap.endpoint_override) 

269 

270 return ClientWrapper(clientv20.Client(**client_args), 

271 admin=admin or context.is_admin) 

272 

273 

274def _is_not_duplicate(item, items, items_list_name, instance): 

275 present = item in items 

276 

277 # The expectation from this function's perspective is that the 

278 # item is not part of the items list so if it is part of it 

279 # we should at least log it as a warning 

280 if present: 

281 LOG.warning("%(item)s already exists in list: %(list_name)s " 

282 "containing: %(items)s. ignoring it", 

283 {'item': item, 

284 'list_name': items_list_name, 

285 'items': items}, 

286 instance=instance) 

287 

288 return not present 

289 

290 

291def _ensure_no_port_binding_failure(port): 

292 binding_vif_type = port.get('binding:vif_type') 

293 if binding_vif_type == network_model.VIF_TYPE_BINDING_FAILED: 

294 raise exception.PortBindingFailed(port_id=port['id']) 

295 

296 

297class API: 

298 """API for interacting with the neutron 2.x API.""" 

299 

300 def __init__(self): 

301 self.last_neutron_extension_sync = None 

302 self.extensions = {} 

303 self.pci_whitelist = pci_whitelist.Whitelist( 

304 CONF.pci.device_spec) 

305 

306 def _update_port_with_migration_profile( 

307 self, instance, port_id, port_profile, admin_client): 

308 try: 

309 updated_port = admin_client.update_port( 

310 port_id, {'port': {constants.BINDING_PROFILE: port_profile}}) 

311 return updated_port 

312 except Exception as ex: 

313 with excutils.save_and_reraise_exception(): 

314 LOG.error("Unable to update binding profile " 

315 "for port: %(port)s due to failure: %(error)s", 

316 {'port': port_id, 'error': ex}, 

317 instance=instance) 

318 

319 def _clear_migration_port_profile( 

320 self, context, instance, admin_client, ports): 

321 for p in ports: 

322 # If the port already has a migration profile and if 

323 # it is to be torn down, then we need to clean up 

324 # the migration profile. 

325 port_profile = get_binding_profile(p) 

326 if not port_profile: 

327 continue 

328 if constants.MIGRATING_ATTR in port_profile: 328 ↛ 321line 328 didn't jump to line 321 because the condition on line 328 was always true

329 del port_profile[constants.MIGRATING_ATTR] 

330 LOG.debug("Removing port %s migration profile", p['id'], 

331 instance=instance) 

332 self._update_port_with_migration_profile( 

333 instance, p['id'], port_profile, admin_client) 

334 

335 def _setup_migration_port_profile( 

336 self, context, instance, host, admin_client, ports): 

337 # Migrating to a new host 

338 for p in ports: 

339 # If the host hasn't changed, there is nothing to do. 

340 # But if the destination host is different than the 

341 # current one, please update the port_profile with 

342 # the 'migrating_to'(constants.MIGRATING_ATTR) key pointing to 

343 # the given 'host'. 

344 host_id = p.get(constants.BINDING_HOST_ID) 

345 if host_id != host: 345 ↛ 338line 345 didn't jump to line 338 because the condition on line 345 was always true

346 port_profile = get_binding_profile(p) 

347 # If the "migrating_to" attribute already points at the given 

348 # host, then skip the port update call since we're not changing 

349 # anything. 

350 if host != port_profile.get(constants.MIGRATING_ATTR): 

351 port_profile[constants.MIGRATING_ATTR] = host 

352 self._update_port_with_migration_profile( 

353 instance, p['id'], port_profile, admin_client) 

354 LOG.debug("Port %(port_id)s updated with migration " 

355 "profile %(profile_data)s successfully", 

356 {'port_id': p['id'], 

357 'profile_data': port_profile}, 

358 instance=instance) 

359 

360 def setup_networks_on_host(self, context, instance, host=None, 

361 teardown=False): 

362 """Setup or teardown the network structures. 

363 

364 :param context: The user request context. 

365 :param instance: The instance with attached ports. 

366 :param host: Optional host used to control the setup. If provided and 

367 is not the same as the current instance.host, this method assumes 

368 the instance is being migrated and sets the "migrating_to" 

369 attribute in the binding profile for the attached ports. 

370 :param teardown: Whether or not network information for the ports 

371 should be cleaned up. If True, at a minimum the "migrating_to" 

372 attribute is cleared in the binding profile for the ports. If a 

373 host is also provided, then port bindings for that host are 

374 deleted when teardown is True as long as the host does not match 

375 the current instance.host. 

376 :raises: nova.exception.PortBindingDeletionFailed if host is not None, 

377 teardown is True, and port binding deletion fails. 

378 """ 

379 # Check if the instance is migrating to a new host. 

380 port_migrating = host and (instance.host != host) 

381 # If the port is migrating to a new host or if it is a 

382 # teardown on the original host, then proceed. 

383 if port_migrating or teardown: 

384 search_opts = {'device_id': instance.uuid, 

385 'tenant_id': instance.project_id, 

386 constants.BINDING_HOST_ID: instance.host} 

387 # Now get the port details to process the ports 

388 # binding profile info. 

389 data = self.list_ports(context, **search_opts) 

390 ports = data['ports'] 

391 admin_client = get_client(context, admin=True) 

392 if teardown: 

393 # Reset the port profile 

394 self._clear_migration_port_profile( 

395 context, instance, admin_client, ports) 

396 # If a host was provided, delete any bindings between that 

397 # host and the ports as long as the host isn't the same as 

398 # the current instance.host. 

399 has_binding_ext = self.has_port_binding_extension( 

400 client=admin_client) 

401 if port_migrating and has_binding_ext: 

402 self._delete_port_bindings(context, ports, host) 

403 elif port_migrating: 403 ↛ exitline 403 didn't return from function 'setup_networks_on_host' because the condition on line 403 was always true

404 # Setup the port profile 

405 self._setup_migration_port_profile( 

406 context, instance, host, admin_client, ports) 

407 

408 def _delete_port_bindings(self, context, ports, host): 

409 """Attempt to delete all port bindings on the host. 

410 

411 :param context: The user request context. 

412 :param ports: list of port dicts to cleanup; the 'id' field is required 

413 per port dict in the list 

414 :param host: host from which to delete port bindings 

415 :raises: PortBindingDeletionFailed if port binding deletion fails. 

416 """ 

417 client = get_client(context, admin=True) 

418 failed_port_ids = [] 

419 

420 for port in ports: 

421 # This call is safe in that 404s for non-existing 

422 # bindings are ignored. 

423 try: 

424 client.delete_port_binding(port['id'], host) 

425 except neutron_client_exc.NeutronClientException as exc: 

426 # We can safely ignore 404s since we're trying to delete 

427 # the thing that wasn't found anyway, but for everything else 

428 # we should log an error 

429 if exc.status_code == 404: 429 ↛ 430line 429 didn't jump to line 430 because the condition on line 429 was never true

430 continue 

431 

432 failed_port_ids.append(port['id']) 

433 LOG.exception( 

434 "Failed to delete binding for port %(port_id)s on host " 

435 "%(host)s", {'port_id': port['id'], 'host': host}) 

436 

437 if failed_port_ids: 

438 raise exception.PortBindingDeletionFailed( 

439 port_id=','.join(failed_port_ids), host=host) 

440 

441 def _get_available_networks(self, context, project_id, 

442 net_ids=None, neutron=None, 

443 auto_allocate=False): 

444 """Return a network list available for the tenant. 

445 The list contains networks owned by the tenant and public networks. 

446 If net_ids specified, it searches networks with requested IDs only. 

447 """ 

448 if not neutron: 

449 neutron = get_client(context) 

450 

451 if net_ids: 

452 # If user has specified to attach instance only to specific 

453 # networks then only add these to **search_opts. This search will 

454 # also include 'shared' networks. 

455 search_opts = {'id': net_ids} 

456 nets = neutron.list_networks(**search_opts).get('networks', []) 

457 else: 

458 # (1) Retrieve non-public network list owned by the tenant. 

459 search_opts = {'tenant_id': project_id, 'shared': False} 

460 if auto_allocate: 

461 # The auto-allocated-topology extension may create complex 

462 # network topologies and it does so in a non-transactional 

463 # fashion. Therefore API users may be exposed to resources that 

464 # are transient or partially built. A client should use 

465 # resources that are meant to be ready and this can be done by 

466 # checking their admin_state_up flag. 

467 search_opts['admin_state_up'] = True 

468 nets = neutron.list_networks(**search_opts).get('networks', []) 

469 # (2) Retrieve public network list. 

470 search_opts = {'shared': True} 

471 nets += neutron.list_networks(**search_opts).get('networks', []) 

472 

473 _ensure_requested_network_ordering( 

474 lambda x: x['id'], 

475 nets, 

476 net_ids) 

477 

478 return nets 

479 

480 def _cleanup_created_port(self, port_client, port_id, instance): 

481 try: 

482 port_client.delete_port(port_id) 

483 except neutron_client_exc.NeutronClientException: 

484 LOG.exception( 

485 'Failed to delete port %(port_id)s while cleaning up after an ' 

486 'error.', {'port_id': port_id}, 

487 instance=instance) 

488 

489 def _create_port_minimal(self, context, port_client, instance, network_id, 

490 fixed_ip=None, security_group_ids=None): 

491 """Attempts to create a port for the instance on the given network. 

492 

493 :param context: The request context. 

494 :param port_client: The client to use to create the port. 

495 :param instance: Create the port for the given instance. 

496 :param network_id: Create the port on the given network. 

497 :param fixed_ip: Optional fixed IP to use from the given network. 

498 :param security_group_ids: Optional list of security group IDs to 

499 apply to the port. 

500 :returns: The created port. 

501 :raises PortLimitExceeded: If neutron fails with an OverQuota error. 

502 :raises NoMoreFixedIps: If neutron fails with 

503 IpAddressGenerationFailure error. 

504 :raises: PortBindingFailed: If port binding failed. 

505 :raises NetworksWithQoSPolicyNotSupported: if the created port has 

506 resource request. 

507 """ 

508 # Set the device_id so it's clear who this port was created for, 

509 # and to stop other instances trying to use it 

510 port_req_body = {'port': {'device_id': instance.uuid}} 

511 try: 

512 if fixed_ip: 

513 port_req_body['port']['fixed_ips'] = [ 

514 {'ip_address': str(fixed_ip)}] 

515 port_req_body['port']['network_id'] = network_id 

516 port_req_body['port']['admin_state_up'] = True 

517 port_req_body['port']['tenant_id'] = instance.project_id 

518 if security_group_ids: 

519 port_req_body['port']['security_groups'] = security_group_ids 

520 

521 port_response = port_client.create_port(port_req_body) 

522 

523 port = port_response['port'] 

524 port_id = port['id'] 

525 

526 # NOTE(gibi): Checking if the created port has resource request as 

527 # such ports are currently not supported as they would at least 

528 # need resource allocation manipulation in placement but might also 

529 # need a new scheduling if resource on this host is not available. 

530 if self._has_resource_request(context, port, port_client): 

531 msg = ( 

532 "The auto-created port %(port_id)s is being deleted due " 

533 "to its network having QoS policy.") 

534 LOG.info(msg, {'port_id': port_id}) 

535 self._cleanup_created_port(port_client, port_id, instance) 

536 # NOTE(gibi): This limitation regarding server create can be 

537 # removed when the port creation is moved to the conductor. But 

538 # this code also limits attaching a network that has QoS 

539 # minimum bandwidth rule. 

540 raise exception.NetworksWithQoSPolicyNotSupported( 

541 instance_uuid=instance.uuid, network_id=network_id) 

542 try: 

543 _ensure_no_port_binding_failure(port) 

544 except exception.PortBindingFailed: 

545 with excutils.save_and_reraise_exception(): 

546 port_client.delete_port(port_id) 

547 

548 LOG.debug('Successfully created port: %s', port_id, 

549 instance=instance) 

550 return port 

551 except neutron_client_exc.InvalidIpForNetworkClient: 

552 LOG.warning('Neutron error: %(ip)s is not a valid IP address ' 

553 'for network %(network_id)s.', 

554 {'ip': fixed_ip, 'network_id': network_id}, 

555 instance=instance) 

556 msg = (_('Fixed IP %(ip)s is not a valid ip address for ' 

557 'network %(network_id)s.') % 

558 {'ip': fixed_ip, 'network_id': network_id}) 

559 raise exception.InvalidInput(reason=msg) 

560 except (neutron_client_exc.IpAddressInUseClient, 

561 neutron_client_exc.IpAddressAlreadyAllocatedClient): 

562 LOG.warning('Neutron error: Fixed IP %s is ' 

563 'already in use.', fixed_ip, instance=instance) 

564 msg = _("Fixed IP %s is already in use.") % fixed_ip 

565 raise exception.FixedIpAlreadyInUse(message=msg) 

566 except neutron_client_exc.OverQuotaClient: 

567 LOG.warning( 

568 'Neutron error: Port quota exceeded in tenant: %s', 

569 port_req_body['port']['tenant_id'], instance=instance) 

570 raise exception.PortLimitExceeded() 

571 except neutron_client_exc.IpAddressGenerationFailureClient: 

572 LOG.warning('Neutron error: No more fixed IPs in network: %s', 

573 network_id, instance=instance) 

574 raise exception.NoMoreFixedIps(net=network_id) 

575 except neutron_client_exc.NeutronClientException: 

576 with excutils.save_and_reraise_exception(): 

577 LOG.exception('Neutron error creating port on network %s', 

578 network_id, instance=instance) 

579 

580 def _update_port(self, port_client, instance, port_id, 

581 port_req_body): 

582 try: 

583 port_response = port_client.update_port(port_id, port_req_body) 

584 port = port_response['port'] 

585 _ensure_no_port_binding_failure(port) 

586 LOG.debug('Successfully updated port: %s', port_id, 

587 instance=instance) 

588 return port 

589 except neutron_client_exc.MacAddressInUseClient: 

590 mac_address = port_req_body['port'].get('mac_address') 

591 network_id = port_req_body['port'].get('network_id') 

592 LOG.warning('Neutron error: MAC address %(mac)s is already ' 

593 'in use on network %(network)s.', 

594 {'mac': mac_address, 'network': network_id}, 

595 instance=instance) 

596 raise exception.PortInUse(port_id=mac_address) 

597 except neutron_client_exc.HostNotCompatibleWithFixedIpsClient: 

598 network_id = port_req_body['port'].get('network_id') 

599 LOG.warning('Neutron error: Tried to bind a port with ' 

600 'fixed_ips to a host in the wrong segment on ' 

601 'network %(network)s.', 

602 {'network': network_id}, instance=instance) 

603 raise exception.FixedIpInvalidOnHost(port_id=port_id) 

604 

605 def _check_external_network_attach(self, context, nets): 

606 """Check if attaching to external network is permitted.""" 

607 if not context.can(servers_policies.NETWORK_ATTACH_EXTERNAL, 

608 fatal=False): 

609 for net in nets: 609 ↛ exitline 609 didn't return from function '_check_external_network_attach' because the loop on line 609 didn't complete

610 # Perform this check here rather than in validate_networks to 

611 # ensure the check is performed every time 

612 # allocate_for_instance is invoked 

613 if net.get('router:external') and not net.get('shared'): 613 ↛ 609line 613 didn't jump to line 609 because the condition on line 613 was always true

614 raise exception.ExternalNetworkAttachForbidden( 

615 network_uuid=net['id']) 

616 

617 def unbind_ports(self, context, ports, detach=True): 

618 """Unbind and detach the given ports by clearing their 

619 device_owner and dns_name. 

620 The device_id will also be cleaned if detach=True. 

621 

622 :param context: The request context. 

623 :param ports: list of port IDs. 

624 """ 

625 neutron = get_client(context) 

626 self._unbind_ports(context, ports, neutron, detach=detach) 

627 

628 def _unbind_ports(self, context, ports, 

629 neutron, port_client=None, detach=True): 

630 """Unbind and detach the given ports by clearing their 

631 device_owner and dns_name. 

632 The device_id will also be cleaned if detach=True. 

633 

634 :param context: The request context. 

635 :param ports: list of port IDs. 

636 :param neutron: neutron client for the current context. 

637 :param port_client: The client with appropriate karma for 

638 updating the ports. 

639 """ 

640 if port_client is None: 

641 # Requires admin creds to set port bindings 

642 port_client = get_client(context, admin=True) 

643 

644 # it is a dict of network dicts as returned by the neutron client keyed 

645 # by network UUID 

646 networks: ty.Dict[str, ty.Dict] = {} 

647 for port_id in ports: 

648 # A port_id is optional in the NetworkRequest object so check here 

649 # in case the caller forgot to filter the list. 

650 if port_id is None: 

651 continue 

652 

653 port_req_body: ty.Dict[str, ty.Any] = { 

654 'port': { 

655 constants.BINDING_HOST_ID: None, 

656 } 

657 } 

658 if detach: 658 ↛ 661line 658 didn't jump to line 661 because the condition on line 658 was always true

659 port_req_body['port']['device_id'] = '' 

660 port_req_body['port']['device_owner'] = '' 

661 try: 

662 port = self._show_port( 

663 context, port_id, neutron_client=neutron, 

664 fields=[constants.BINDING_PROFILE, 'network_id']) 

665 except exception.PortNotFound: 

666 LOG.debug('Unable to show port %s as it no longer ' 

667 'exists.', port_id) 

668 continue 

669 except Exception: 

670 # NOTE: In case we can't retrieve the binding:profile or 

671 # network info assume that they are empty 

672 LOG.exception("Unable to get binding:profile for port '%s'", 

673 port_id) 

674 port_profile = {} 

675 network: dict = {} 

676 else: 

677 port_profile = get_binding_profile(port) 

678 net_id = port.get('network_id') 

679 if net_id in networks: 

680 network = networks[net_id] 

681 else: 

682 network = neutron.show_network(net_id, 

683 fields=['dns_domain'] 

684 ).get('network') 

685 networks[net_id] = network 

686 

687 # Unbind Port device 

688 if port_profile.get('arq_uuid'): 

689 """Delete device profile by arq uuid.""" 

690 cyclient = cyborg.get_client(context) 

691 cyclient.delete_arqs_by_uuid([port_profile['arq_uuid']]) 

692 LOG.debug('Delete ARQs %s for port %s', 

693 port_profile['arq_uuid'], port_id) 

694 

695 # NOTE: We're doing this to remove the binding information 

696 # for the physical device but don't want to overwrite the other 

697 # information in the binding profile. 

698 for profile_key in ('pci_vendor_info', 'pci_slot', 

699 constants.ALLOCATION, 'arq_uuid', 

700 'physical_network', 'card_serial_number', 

701 'vf_num', 'pf_mac_address', 

702 'device_mac_address'): 

703 if profile_key in port_profile: 

704 del port_profile[profile_key] 

705 port_req_body['port'][constants.BINDING_PROFILE] = port_profile 

706 

707 # NOTE: For internal DNS integration (network does not have a 

708 # dns_domain), or if we cannot retrieve network info, we use the 

709 # admin client to reset dns_name. 

710 if ( 

711 self.has_dns_extension(client=port_client) and 

712 not network.get('dns_domain') 

713 ): 

714 port_req_body['port']['dns_name'] = '' 

715 

716 try: 

717 port_client.update_port(port_id, port_req_body) 

718 except neutron_client_exc.PortNotFoundClient: 

719 LOG.debug('Unable to unbind port %s as it no longer ' 

720 'exists.', port_id) 

721 except Exception: 

722 LOG.exception("Unable to clear device ID for port '%s'", 

723 port_id) 

724 # NOTE: For external DNS integration, we use the neutron client 

725 # with user's context to reset the dns_name since the recordset is 

726 # under user's zone. 

727 self._reset_port_dns_name(network, port_id, neutron) 

728 

729 def _validate_requested_port_ids(self, context, instance, neutron, 

730 requested_networks): 

731 """Processes and validates requested networks for allocation. 

732 

733 Iterates over the list of NetworkRequest objects, validating the 

734 request and building sets of ports and networks to 

735 use for allocating ports for the instance. 

736 

737 :param context: The user request context. 

738 :type context: nova.context.RequestContext 

739 :param instance: allocate networks on this instance 

740 :type instance: nova.objects.Instance 

741 :param neutron: neutron client session 

742 :type neutron: neutronclient.v2_0.client.Client 

743 :param requested_networks: List of user-requested networks and/or ports 

744 :type requested_networks: nova.objects.NetworkRequestList 

745 :returns: tuple of: 

746 - ports: dict mapping of port id to port dict 

747 - ordered_networks: list of nova.objects.NetworkRequest objects 

748 for requested networks (either via explicit network request 

749 or the network for an explicit port request) 

750 :raises nova.exception.PortNotFound: If a requested port is not found 

751 in Neutron. 

752 :raises nova.exception.PortNotUsable: If a requested port is not owned 

753 by the same tenant that the instance is created under. 

754 :raises nova.exception.PortInUse: If a requested port is already 

755 attached to another instance. 

756 :raises nova.exception.PortNotUsableDNS: If a requested port has a 

757 value assigned to its dns_name attribute. 

758 """ 

759 ports = {} 

760 ordered_networks = [] 

761 # If we're asked to auto-allocate the network then there won't be any 

762 # ports or real neutron networks to lookup, so just return empty 

763 # results. 

764 if requested_networks and not requested_networks.auto_allocate: 

765 for request in requested_networks: 

766 

767 # Process a request to use a pre-existing neutron port. 

768 if request.port_id: 

769 # Make sure the port exists. 

770 port = self._show_port(context, request.port_id, 

771 neutron_client=neutron) 

772 # Make sure the instance has access to the port. 

773 if port['tenant_id'] != instance.project_id: 

774 raise exception.PortNotUsable(port_id=request.port_id, 

775 instance=instance.uuid) 

776 

777 # Make sure the port isn't already attached to another 

778 # instance. 

779 if port.get('device_id'): 

780 raise exception.PortInUse(port_id=request.port_id) 

781 

782 # Make sure that if the user assigned a value to the port's 

783 # dns_name attribute, it is equal to the instance's 

784 # hostname 

785 if port.get('dns_name'): 

786 if port['dns_name'] != instance.hostname: 

787 raise exception.PortNotUsableDNS( 

788 port_id=request.port_id, 

789 instance=instance.uuid, value=port['dns_name'], 

790 hostname=instance.hostname) 

791 

792 # Make sure the port is usable 

793 _ensure_no_port_binding_failure(port) 

794 

795 # If requesting a specific port, automatically process 

796 # the network for that port as if it were explicitly 

797 # requested. 

798 request.network_id = port['network_id'] 

799 ports[request.port_id] = port 

800 

801 # Process a request to use a specific neutron network. 

802 if request.network_id: 

803 ordered_networks.append(request) 

804 

805 return ports, ordered_networks 

806 

807 def _clean_security_groups(self, security_groups): 

808 """Cleans security groups requested from Nova API 

809 

810 Neutron already passes a 'default' security group when 

811 creating ports so it's not necessary to specify it to the 

812 request. 

813 """ 

814 if not security_groups: 

815 security_groups = [] 

816 elif security_groups == [constants.DEFAULT_SECGROUP]: 

817 security_groups = [] 

818 return security_groups 

819 

820 def _get_security_group_ids(self, security_groups, user_security_groups): 

821 """Processes requested security groups based on existing user groups 

822 

823 :param security_groups: list of security group names or IDs 

824 :param user_security_groups: list of Neutron security groups found 

825 :return: list of security group IDs 

826 :raises nova.exception.NoUniqueMatch: If multiple security groups 

827 are requested with the same name. 

828 :raises nova.exception.SecurityGroupNotFound: If a given security group 

829 is not found. 

830 """ 

831 # Initialize two dictionaries to map security group names and IDs to 

832 # their corresponding IDs 

833 name_to_id = {} 

834 # NOTE(sean-k-mooney): using a dict here instead of a set is faster 

835 # probably due to l1 code cache misses due to the introduction 

836 # of set lookup in addition to dict lookups making the branch 

837 # prediction for the second for loop less reliable. 

838 id_to_id = {} 

839 

840 # Populate the dictionaries with user security groups 

841 for user_security_group in user_security_groups: 

842 name = user_security_group['name'] 

843 sg_id = user_security_group['id'] 

844 

845 # Check for duplicate names and raise an exception if found 

846 if name in name_to_id: 

847 raise exception.NoUniqueMatch( 

848 _("Multiple security groups found matching" 

849 " '%s'. Use an ID to be more specific.") % name) 

850 # Map the name to its corresponding ID 

851 name_to_id[name] = sg_id 

852 # Map the ID to itself for easy lookup 

853 id_to_id[sg_id] = sg_id 

854 

855 # Initialize an empty list to store the resulting security group IDs 

856 security_group_ids = [] 

857 

858 # Iterate over the requested security groups 

859 for security_group in security_groups: 

860 # Check if the security group is in the name-to-ID dictionary 

861 # as if a user names the security group the same as 

862 # another's security groups uuid, the name takes priority. 

863 if security_group in name_to_id: 

864 security_group_ids.append(name_to_id[security_group]) 

865 # Check if the security group is in the ID-to-ID dictionary 

866 elif security_group in id_to_id: 

867 security_group_ids.append(id_to_id[security_group]) 

868 # Raise an exception if the security group is not found in 

869 # either dictionary 

870 else: 

871 raise exception.SecurityGroupNotFound( 

872 security_group_id=security_group) 

873 

874 # Return the list of security group IDs 

875 return security_group_ids 

876 

877 def _process_security_groups(self, instance, neutron, security_groups): 

878 """Processes and validates requested security groups for allocation. 

879 

880 Iterates over the list of requested security groups, validating the 

881 request and filtering out the list of security group IDs to use for 

882 port allocation. 

883 

884 :param instance: allocate networks on this instance 

885 :type instance: nova.objects.Instance 

886 :param neutron: neutron client session 

887 :type neutron: neutronclient.v2_0.client.Client 

888 :param security_groups: list of requested security group name or IDs 

889 to use when allocating new ports for the instance 

890 :return: list of security group IDs to use when allocating new ports 

891 :raises nova.exception.NoUniqueMatch: If multiple security groups 

892 are requested with the same name. 

893 :raises nova.exception.SecurityGroupNotFound: If a requested security 

894 group is not in the tenant-filtered list of available security 

895 groups in Neutron. 

896 """ 

897 security_group_ids = [] 

898 # TODO(arosen) Should optimize more to do direct query for security 

899 # group if len(security_groups) == 1 

900 if len(security_groups): 

901 # NOTE(slaweq): fields other than name and id aren't really needed 

902 # so asking only about those fields will allow Neutron to not 

903 # prepare list of rules for each found security group. That may 

904 # speed processing of this request a lot in case when tenant has 

905 # got many security groups 

906 sg_fields = ['id', 'name'] 

907 search_opts = {'tenant_id': instance.project_id} 

908 sg_filter_ext = self.has_sg_shared_filter_extension(client=neutron) 

909 user_security_groups = neutron.list_security_groups( 

910 fields=sg_fields, **search_opts).get('security_groups') 

911 

912 try: 

913 security_group_ids = self._get_security_group_ids( 

914 security_groups, user_security_groups) 

915 except exception.SecurityGroupNotFound: 

916 # Trigger a raise if the shared filter extension is not loaded, 

917 # else we will trigger on the second call below when we pass 

918 # any shared security groups. 

919 if not sg_filter_ext: 919 ↛ 920line 919 didn't jump to line 920 because the condition on line 919 was never true

920 raise 

921 

922 # NOTE(hangyang): Make another request to get the RBAC shared 

923 # SGs accessible to the tenant 

924 search_opts = {'shared': True} 

925 user_security_groups += neutron.list_security_groups( 

926 fields=sg_fields, **search_opts).get('security_groups') 

927 

928 security_group_ids = self._get_security_group_ids( 

929 security_groups, user_security_groups) 

930 

931 return security_group_ids 

932 

933 def _validate_requested_network_ids(self, context, instance, neutron, 

934 requested_networks, ordered_networks): 

935 """Check requested networks using the Neutron API. 

936 

937 Check the user has access to the network they requested, and that 

938 it is a suitable network to connect to. This includes getting the 

939 network details for any ports that have been passed in, because the 

940 request will have been updated with the network_id in 

941 _validate_requested_port_ids. 

942 

943 If the user has not requested any ports or any networks, we get back 

944 a full list of networks the user has access to, and if there is only 

945 one network, we update ordered_networks so we will connect the 

946 instance to that network. 

947 

948 :param context: The request context. 

949 :param instance: nova.objects.instance.Instance object. 

950 :param neutron: neutron client 

951 :param requested_networks: nova.objects.NetworkRequestList, list of 

952 user-requested networks and/or ports; may be empty 

953 :param ordered_networks: output from _validate_requested_port_ids 

954 that will be used to create and update ports 

955 :returns: dict, keyed by network ID, of networks to use 

956 :raises InterfaceAttachFailedNoNetwork: If no specific networks were 

957 requested and none are available. 

958 :raises NetworkAmbiguous: If no specific networks were requested but 

959 more than one is available. 

960 :raises ExternalNetworkAttachForbidden: If the policy rules forbid 

961 the request context from using an external non-shared network but 

962 one was requested (or available). 

963 """ 

964 

965 # Get networks from Neutron 

966 # If net_ids is empty, this actually returns all available nets 

967 auto_allocate = requested_networks and requested_networks.auto_allocate 

968 net_ids = [request.network_id for request in ordered_networks] 

969 nets = self._get_available_networks(context, instance.project_id, 

970 net_ids, neutron=neutron, 

971 auto_allocate=auto_allocate) 

972 if not nets: 

973 

974 if requested_networks: 

975 # There are no networks available for the project to use and 

976 # none specifically requested, so check to see if we're asked 

977 # to auto-allocate the network. 

978 if auto_allocate: 

979 # During validate_networks we checked to see if 

980 # auto-allocation is available so we don't need to do that 

981 # again here. 

982 nets = [self._auto_allocate_network(instance, neutron)] 

983 else: 

984 # NOTE(chaochin): If user specifies a network id and the 

985 # network can not be found, raise NetworkNotFound error. 

986 for request in requested_networks: 986 ↛ 995line 986 didn't jump to line 995 because the loop on line 986 didn't complete

987 if not request.port_id and request.network_id: 987 ↛ 986line 987 didn't jump to line 986 because the condition on line 987 was always true

988 raise exception.NetworkNotFound( 

989 network_id=request.network_id) 

990 else: 

991 # no requested nets and user has no available nets 

992 return {} 

993 

994 # if this function is directly called without a requested_network param 

995 if (not requested_networks or 

996 requested_networks.is_single_unspecified or 

997 requested_networks.auto_allocate): 

998 # If no networks were requested and none are available, consider 

999 # it a bad request. 

1000 if not nets: 1000 ↛ 1001line 1000 didn't jump to line 1001 because the condition on line 1000 was never true

1001 raise exception.InterfaceAttachFailedNoNetwork( 

1002 project_id=instance.project_id) 

1003 # bug/1267723 - if no network is requested and more 

1004 # than one is available then raise NetworkAmbiguous Exception 

1005 if len(nets) > 1: 

1006 msg = _("Multiple possible networks found, use a Network " 

1007 "ID to be more specific.") 

1008 raise exception.NetworkAmbiguous(msg) 

1009 ordered_networks.append( 

1010 objects.NetworkRequest(network_id=nets[0]['id'])) 

1011 

1012 # NOTE(melwitt): check external net attach permission after the 

1013 # check for ambiguity, there could be another 

1014 # available net which is permitted bug/1364344 

1015 self._check_external_network_attach(context, nets) 

1016 

1017 return {net['id']: net for net in nets} 

1018 

1019 def _create_ports_for_instance(self, context, instance, ordered_networks, 

1020 nets, neutron, security_group_ids): 

1021 """Create port for network_requests that don't have a port_id 

1022 

1023 :param context: The request context. 

1024 :param instance: nova.objects.instance.Instance object. 

1025 :param ordered_networks: objects.NetworkRequestList in requested order 

1026 :param nets: a dict of network_id to networks returned from neutron 

1027 :param neutron: neutronclient built from users request context 

1028 :param security_group_ids: a list of security group IDs to be applied 

1029 to any ports created 

1030 :returns a list of pairs (NetworkRequest, created_port_uuid); note that 

1031 created_port_uuid will be None for the pair where a pre-existing 

1032 port was part of the user request 

1033 """ 

1034 created_port_ids = [] 

1035 requests_and_created_ports = [] 

1036 for request in ordered_networks: 

1037 network = nets.get(request.network_id) 

1038 # if network_id did not pass validate_networks() and not available 

1039 # here then skip it safely not continuing with a None Network 

1040 if not network: 

1041 continue 

1042 

1043 try: 

1044 port_security_enabled = network.get( 

1045 'port_security_enabled', True) 

1046 if port_security_enabled: 

1047 if not network.get('subnets'): 

1048 # Neutron can't apply security groups to a port 

1049 # for a network without L3 assignments. 

1050 LOG.debug('Network with port security enabled does ' 

1051 'not have subnets so security groups ' 

1052 'cannot be applied: %s', 

1053 network, instance=instance) 

1054 raise exception.SecurityGroupCannotBeApplied() 

1055 else: 

1056 if security_group_ids: 

1057 # We don't want to apply security groups on port 

1058 # for a network defined with 

1059 # 'port_security_enabled=False'. 

1060 LOG.debug('Network has port security disabled so ' 

1061 'security groups cannot be applied: %s', 

1062 network, instance=instance) 

1063 raise exception.SecurityGroupCannotBeApplied() 

1064 

1065 created_port_id = None 

1066 if not request.port_id: 

1067 # create minimal port, if port not already created by user 

1068 created_port = self._create_port_minimal( 

1069 context, neutron, instance, request.network_id, 

1070 request.address, security_group_ids) 

1071 created_port_id = created_port['id'] 

1072 created_port_ids.append(created_port_id) 

1073 

1074 requests_and_created_ports.append(( 

1075 request, created_port_id)) 

1076 

1077 except Exception: 

1078 with excutils.save_and_reraise_exception(): 

1079 if created_port_ids: 

1080 self._delete_ports( 

1081 neutron, instance, created_port_ids) 

1082 

1083 return requests_and_created_ports 

1084 

1085 def _has_resource_request(self, context, port, neutron): 

1086 resource_request = port.get(constants.RESOURCE_REQUEST) or {} 

1087 if self.has_extended_resource_request_extension(context, neutron): 

1088 return bool(resource_request.get(constants.REQUEST_GROUPS, [])) 

1089 else: 

1090 return bool(resource_request) 

1091 

1092 def instance_has_extended_resource_request(self, instance_uuid): 

1093 # NOTE(gibi): We need to use an admin context to query neutron ports as 

1094 # neutron does not fill the resource_request field in the port response 

1095 # if we query with a non admin context. 

1096 admin_context = nova_context.get_admin_context() 

1097 

1098 if not self.has_extended_resource_request_extension(admin_context): 

1099 # Short circuit if the extended resource request API extension is 

1100 # not available 

1101 return False 

1102 

1103 # So neutron supports the extended resource request but does the 

1104 # instance has a port with such request 

1105 search_opts = {'device_id': instance_uuid, 

1106 'fields': [constants.RESOURCE_REQUEST]} 

1107 ports = self.list_ports( 

1108 admin_context, **search_opts).get('ports', []) 

1109 

1110 for port in ports: 

1111 resource_request = port.get(constants.RESOURCE_REQUEST) or {} 

1112 if resource_request.get(constants.REQUEST_GROUPS, []): 

1113 return True 

1114 return False 

1115 

1116 def get_binding_profile_allocation( 

1117 self, 

1118 context: nova_context.RequestContext, 

1119 port_id: str, 

1120 resource_provider_mapping: ty.Dict[str, ty.List[str]], 

1121 ) -> ty.Union[None, str, ty.Dict[str, str]]: 

1122 """Calculate the value of the allocation key of the binding:profile 

1123 based on the allocated resources. 

1124 

1125 :param context: the request context 

1126 :param port_id: the uuid of the neutron port 

1127 :param resource_provider_mapping: the mapping returned by the placement 

1128 defining which request group get allocated from which resource 

1129 providers 

1130 :returns: None if the port has no resource request. Returns a single 

1131 RP UUID if the port has a legacy resource request. Returns a dict 

1132 of request group id: resource provider UUID mapping if the port has 

1133 an extended resource request. 

1134 """ 

1135 # We need to use an admin client as the port.resource_request is admin 

1136 # only 

1137 neutron_admin = get_client(context, admin=True) 

1138 neutron = get_client(context) 

1139 port = self._show_port(context, port_id, neutron_client=neutron_admin) 

1140 if self._has_resource_request(context, port, neutron): 

1141 return self._get_binding_profile_allocation( 

1142 context, port, neutron, resource_provider_mapping) 

1143 else: 

1144 return None 

1145 

1146 def _get_binding_profile_allocation( 

1147 self, context, port, neutron, resource_provider_mapping 

1148 ): 

1149 # TODO(gibi): remove this condition and the else branch once Nova does 

1150 # not need to support old Neutron sending the legacy resource request 

1151 # extension 

1152 if self.has_extended_resource_request_extension( 

1153 context, neutron 

1154 ): 

1155 # The extended resource request format also means that a 

1156 # port has more than a one request groups 

1157 request_groups = port.get( 

1158 constants.RESOURCE_REQUEST, {}).get( 

1159 constants.REQUEST_GROUPS, []) 

1160 # Each request group id from the port needs to be mapped to 

1161 # a single provider id from the provider mappings. Each 

1162 # group from the port is mapped to a numbered request group 

1163 # in placement so we can assume that they are mapped to 

1164 # a single provider and therefore the provider mapping list 

1165 # has a single provider id. 

1166 allocation = { 

1167 group['id']: resource_provider_mapping[group['id']][0] 

1168 for group in request_groups 

1169 } 

1170 else: 

1171 # This is the legacy resource request format where a port 

1172 # is mapped to a single request group 

1173 # NOTE(gibi): In the resource provider mapping there can be 

1174 # more than one RP fulfilling a request group. But resource 

1175 # requests of a Neutron port is always mapped to a 

1176 # numbered request group that is always fulfilled by one 

1177 # resource provider. So we only pass that single RP UUID 

1178 # here. 

1179 allocation = resource_provider_mapping[ 

1180 port['id']][0] 

1181 

1182 return allocation 

1183 

1184 def allocate_for_instance(self, context, instance, 

1185 requested_networks, 

1186 security_groups=None, bind_host_id=None, 

1187 resource_provider_mapping=None, 

1188 network_arqs=None): 

1189 """Allocate network resources for the instance. 

1190 

1191 :param context: The request context. 

1192 :param instance: nova.objects.instance.Instance object. 

1193 :param requested_networks: objects.NetworkRequestList object. 

1194 :param security_groups: None or security groups to allocate for 

1195 instance. 

1196 :param bind_host_id: the host ID to attach to the ports being created. 

1197 :param resource_provider_mapping: a dict keyed by ids of the entities 

1198 (for example Neutron port) requesting resources for this instance 

1199 mapped to a list of resource provider UUIDs that are fulfilling 

1200 such a resource request. 

1201 :param network_arqs: dict keyed by arq uuid, of ARQs allocated to 

1202 ports. 

1203 :returns: network info as from get_instance_nw_info() 

1204 """ 

1205 LOG.debug('allocate_for_instance()', instance=instance) 

1206 if not instance.project_id: 

1207 msg = _('empty project id for instance %s') 

1208 raise exception.InvalidInput( 

1209 reason=msg % instance.uuid) 

1210 

1211 # We do not want to create a new neutron session for each call 

1212 neutron = get_client(context) 

1213 

1214 # We always need admin_client to build nw_info, 

1215 # we sometimes need it when updating ports 

1216 admin_client = get_client(context, admin=True) 

1217 

1218 # 

1219 # Validate ports and networks with neutron. The requested_ports_dict 

1220 # variable is a dict, keyed by port ID, of ports that were on the user 

1221 # request and may be empty. The ordered_networks variable is a list of 

1222 # NetworkRequest objects for any networks or ports specifically 

1223 # requested by the user, which again may be empty. 

1224 # 

1225 

1226 # NOTE(gibi): we use the admin_client here to ensure that the returned 

1227 # ports has the resource_request attribute filled as later we use this 

1228 # information to decide when to add allocation key to the port binding. 

1229 # See bug 1849657. 

1230 requested_ports_dict, ordered_networks = ( 

1231 self._validate_requested_port_ids( 

1232 context, instance, admin_client, requested_networks)) 

1233 

1234 nets = self._validate_requested_network_ids( 

1235 context, instance, neutron, requested_networks, ordered_networks) 

1236 if not nets: 

1237 LOG.debug("No network configured", instance=instance) 

1238 return network_model.NetworkInfo([]) 

1239 

1240 # Validate requested security groups 

1241 security_groups = self._clean_security_groups(security_groups) 

1242 security_group_ids = self._process_security_groups( 

1243 instance, neutron, security_groups) 

1244 

1245 # Tell Neutron which resource provider fulfills the ports' resource 

1246 # request. 

1247 # We only consider pre-created ports here as ports created 

1248 # below based on requested networks are not scheduled to have their 

1249 # resource request fulfilled. 

1250 for port in requested_ports_dict.values(): 

1251 # only communicate the allocations if the port has resource 

1252 # requests 

1253 if self._has_resource_request(context, port, neutron): 

1254 

1255 profile = get_binding_profile(port) 

1256 profile[constants.ALLOCATION] = ( 

1257 self._get_binding_profile_allocation( 

1258 context, port, neutron, resource_provider_mapping)) 

1259 port[constants.BINDING_PROFILE] = profile 

1260 

1261 # Create ports from the list of ordered_networks. The returned 

1262 # requests_and_created_ports variable is a list of 2-item tuples of 

1263 # the form (NetworkRequest, created_port_id). Note that a tuple pair 

1264 # will have None for the created_port_id if the NetworkRequest already 

1265 # contains a port_id, meaning the user requested a specific 

1266 # pre-existing port so one wasn't created here. The ports will be 

1267 # updated later in _update_ports_for_instance to be bound to the 

1268 # instance and compute host. 

1269 requests_and_created_ports = self._create_ports_for_instance( 

1270 context, instance, ordered_networks, nets, neutron, 

1271 security_group_ids) 

1272 

1273 # 

1274 # Update existing and newly created ports 

1275 # 

1276 

1277 ordered_nets, ordered_port_ids, preexisting_port_ids, \ 

1278 created_port_ids = self._update_ports_for_instance( 

1279 context, instance, 

1280 neutron, admin_client, requests_and_created_ports, nets, 

1281 bind_host_id, requested_ports_dict, network_arqs) 

1282 

1283 # 

1284 # Perform a full update of the network_info_cache, 

1285 # including re-fetching lots of the required data from neutron 

1286 # 

1287 nw_info = self.get_instance_nw_info( 

1288 context, instance, networks=ordered_nets, 

1289 port_ids=ordered_port_ids, 

1290 admin_client=admin_client, 

1291 preexisting_port_ids=preexisting_port_ids) 

1292 # Only return info about ports we processed in this run, which might 

1293 # have been pre-existing neutron ports or ones that nova created. In 

1294 # the initial allocation case (server create), this will be everything 

1295 # we processed, and in later runs will only be what was processed that 

1296 # time. For example, if the instance was created with port A and 

1297 # then port B was attached in this call, only port B would be returned. 

1298 # Thus, this filtering only affects the attach case. 

1299 return network_model.NetworkInfo([vif for vif in nw_info 

1300 if vif['id'] in created_port_ids + 

1301 preexisting_port_ids]) 

1302 

1303 def _update_ports_for_instance(self, context, instance, neutron, 

1304 admin_client, requests_and_created_ports, nets, 

1305 bind_host_id, requested_ports_dict, network_arqs): 

1306 """Update ports from network_requests. 

1307 

1308 Updates the pre-existing ports and the ones created in 

1309 ``_create_ports_for_instance`` with ``device_id``, ``device_owner``, 

1310 optionally ``mac_address`` and, depending on the 

1311 loaded extensions, ``rxtx_factor``, ``binding:host_id``, ``dns_name``. 

1312 

1313 :param context: The request context. 

1314 :param instance: nova.objects.instance.Instance object. 

1315 :param neutron: client using user context 

1316 :param admin_client: client using admin context 

1317 :param requests_and_created_ports: [(NetworkRequest, created_port_id)]; 

1318 Note that created_port_id will be None for any user-requested 

1319 pre-existing port. 

1320 :param nets: a dict of network_id to networks returned from neutron 

1321 :param bind_host_id: a string for port['binding:host_id'] 

1322 :param requested_ports_dict: dict, keyed by port ID, of ports requested 

1323 by the user 

1324 :param network_arqs: dict keyed by arq uuid, of ARQs allocated to 

1325 ports. 

1326 :returns: tuple with the following:: 

1327 

1328 * list of network dicts in their requested order 

1329 * list of port IDs in their requested order - note that does not 

1330 mean the port was requested by the user, it could be a port 

1331 created on a network requested by the user 

1332 * list of pre-existing port IDs requested by the user 

1333 * list of created port IDs 

1334 """ 

1335 

1336 # We currently require admin creds to set port bindings. 

1337 port_client = admin_client 

1338 

1339 preexisting_port_ids = [] 

1340 created_port_ids = [] 

1341 ports_in_requested_order = [] 

1342 nets_in_requested_order = [] 

1343 created_vifs = [] # this list is for cleanups if we fail 

1344 for request, created_port_id in requests_and_created_ports: 

1345 vifobj = objects.VirtualInterface(context) 

1346 vifobj.instance_uuid = instance.uuid 

1347 vifobj.tag = request.tag if 'tag' in request else None 

1348 

1349 network = nets.get(request.network_id) 

1350 # if network_id did not pass validate_networks() and not available 

1351 # here then skip it safely not continuing with a None Network 

1352 if not network: 1352 ↛ 1353line 1352 didn't jump to line 1353 because the condition on line 1352 was never true

1353 continue 

1354 

1355 nets_in_requested_order.append(network) 

1356 

1357 zone = 'compute:%s' % instance.availability_zone 

1358 port_req_body = {'port': {'device_id': instance.uuid, 

1359 'device_owner': zone}} 

1360 if (requested_ports_dict and 

1361 request.port_id in requested_ports_dict and 

1362 get_binding_profile(requested_ports_dict[request.port_id])): 

1363 port_req_body['port'][constants.BINDING_PROFILE] = \ 

1364 get_binding_profile(requested_ports_dict[request.port_id]) 

1365 try: 

1366 port_arq = None 

1367 if network_arqs: 1367 ↛ 1368line 1367 didn't jump to line 1368 because the condition on line 1367 was never true

1368 port_arq = network_arqs.get(request.arq_uuid, None) 

1369 self._populate_neutron_extension_values( 

1370 context, instance, request.pci_request_id, port_req_body, 

1371 network=network, neutron=neutron, 

1372 bind_host_id=bind_host_id, 

1373 port_arq=port_arq) 

1374 # NOTE(gibi): Remove this once we are sure that the fix for 

1375 # bug 1942329 is always present in the deployed neutron. The 

1376 # _populate_neutron_extension_values() call above already 

1377 # populated this MAC to the binding profile instead. 

1378 self._populate_pci_mac_address(instance, 

1379 request.pci_request_id, port_req_body) 

1380 

1381 if created_port_id: 

1382 port_id = created_port_id 

1383 created_port_ids.append(port_id) 

1384 else: 

1385 port_id = request.port_id 

1386 ports_in_requested_order.append(port_id) 

1387 

1388 # After port is created, update other bits 

1389 updated_port = self._update_port( 

1390 port_client, instance, port_id, port_req_body) 

1391 

1392 # NOTE(danms): The virtual_interfaces table enforces global 

1393 # uniqueness on MAC addresses, which clearly does not match 

1394 # with neutron's view of the world. Since address is a 255-char 

1395 # string we can namespace it with our port id. Using '/' should 

1396 # be safely excluded from MAC address notations as well as 

1397 # UUIDs. We can stop doing this now that we've removed 

1398 # nova-network, but we need to leave the read translation in 

1399 # for longer than that of course. 

1400 vifobj.address = '%s/%s' % (updated_port['mac_address'], 

1401 updated_port['id']) 

1402 vifobj.uuid = port_id 

1403 vifobj.create() 

1404 created_vifs.append(vifobj) 

1405 

1406 if not created_port_id: 

1407 # only add if update worked and port create not called 

1408 preexisting_port_ids.append(port_id) 

1409 

1410 self._update_port_dns_name(context, instance, network, 

1411 ports_in_requested_order[-1], 

1412 neutron) 

1413 except Exception: 

1414 with excutils.save_and_reraise_exception(): 

1415 self._unbind_ports(context, 

1416 preexisting_port_ids, 

1417 neutron, port_client) 

1418 self._delete_ports(neutron, instance, created_port_ids) 

1419 for vif in created_vifs: 

1420 vif.destroy() 

1421 

1422 return (nets_in_requested_order, ports_in_requested_order, 

1423 preexisting_port_ids, created_port_ids) 

1424 

1425 def _refresh_neutron_extensions_cache(self, client): 

1426 """Refresh the neutron extensions cache when necessary.""" 

1427 if (not self.last_neutron_extension_sync or 

1428 ((time.time() - self.last_neutron_extension_sync) >= 

1429 CONF.neutron.extension_sync_interval)): 

1430 extensions_list = client.list_extensions()['extensions'] 

1431 self.last_neutron_extension_sync = time.time() 

1432 self.extensions.clear() 

1433 self.extensions = {ext['alias']: ext for ext in extensions_list} 

1434 

1435 def _has_extension(self, extension, context=None, client=None): 

1436 """Check if the provided neutron extension is enabled. 

1437 

1438 :param extension: The alias of the extension to check 

1439 :param client: keystoneauth1.adapter.Adapter 

1440 :param context: nova.context.RequestContext 

1441 :returns: True if the neutron extension is available, else False 

1442 """ 

1443 if client is None: 

1444 client = get_client(context) 

1445 

1446 self._refresh_neutron_extensions_cache(client) 

1447 return extension in self.extensions 

1448 

1449 def has_multi_provider_extension(self, context=None, client=None): 

1450 """Check if the 'multi-provider' extension is enabled. 

1451 

1452 This extension allows administrative users to define multiple physical 

1453 bindings for a logical network. 

1454 """ 

1455 return self._has_extension(constants.MULTI_PROVIDER, context, client) 

1456 

1457 def has_dns_extension(self, context=None, client=None): 

1458 """Check if the 'dns-integration' extension is enabled. 

1459 

1460 This extension adds the 'dns_name' and 'dns_assignment' attributes to 

1461 port resources. 

1462 """ 

1463 return self._has_extension(constants.DNS_INTEGRATION, context, client) 

1464 

1465 def has_sg_shared_filter_extension(self, context=None, client=None): 

1466 """Check if the 'security-groups-shared-filtering' extension is 

1467 enabled. 

1468 

1469 This extension adds a 'shared' filter to security group APIs. 

1470 """ 

1471 return self._has_extension(constants.SG_SHARED_FILTER, context, client) 

1472 

1473 # TODO(gibi): Remove all branches where this is False after Neutron made 

1474 # the this extension mandatory. In Xena this extension will be optional to 

1475 # support the scenario where Neutron upgraded first. So Neutron can mark 

1476 # this mandatory earliest in Yoga. 

1477 def has_extended_resource_request_extension( 

1478 self, context=None, client=None, 

1479 ): 

1480 return self._has_extension( 

1481 constants.RESOURCE_REQUEST_GROUPS, context, client, 

1482 ) 

1483 

1484 def has_vnic_index_extension(self, context=None, client=None): 

1485 """Check if the 'vnic-index' extension is enabled. 

1486 

1487 This extension is provided by the VMWare NSX neutron plugin. 

1488 """ 

1489 return self._has_extension(constants.VNIC_INDEX, context, client) 

1490 

1491 def has_fip_port_details_extension(self, context=None, client=None): 

1492 """Check if the 'fip-port-details' extension is enabled. 

1493 

1494 This extension adds the 'port_details' attribute to floating IPs. 

1495 """ 

1496 return self._has_extension(constants.FIP_PORT_DETAILS, context, client) 

1497 

1498 def has_substr_port_filtering_extension(self, context=None, client=None): 

1499 """Check if the 'ip-substring-filtering' extension is enabled. 

1500 

1501 This extension adds support for filtering ports by using part of an IP 

1502 address. 

1503 """ 

1504 return self._has_extension( 

1505 constants.SUBSTR_PORT_FILTERING, context, client 

1506 ) 

1507 

1508 def has_segment_extension(self, context=None, client=None): 

1509 """Check if the neutron 'segment' extension is enabled. 

1510 

1511 This extension exposes information about L2 segments of a network. 

1512 """ 

1513 return self._has_extension( 

1514 constants.SEGMENT, context, client, 

1515 ) 

1516 

1517 def has_port_binding_extension(self, context=None, client=None): 

1518 """Check if the neutron 'binding-extended' extension is enabled. 

1519 

1520 This extensions exposes port bindings of a virtual port to external 

1521 application. 

1522 

1523 This extension allows nova to bind a port to multiple hosts at the same 

1524 time, like during live migration. 

1525 """ 

1526 return self._has_extension( 

1527 constants.PORT_BINDING_EXTENDED, context, client 

1528 ) 

1529 

1530 def bind_ports_to_host(self, context, instance, host, 

1531 vnic_types=None, port_profiles=None): 

1532 """Attempts to bind the ports from the instance on the given host 

1533 

1534 If the ports are already actively bound to another host, like the 

1535 source host during live migration, then the new port bindings will 

1536 be inactive, assuming $host is the destination host for the live 

1537 migration. 

1538 

1539 In the event of an error, any ports which were successfully bound to 

1540 the host should have those host bindings removed from the ports. 

1541 

1542 This method should not be used if "has_port_binding_extension" 

1543 returns False. 

1544 

1545 :param context: the user request context 

1546 :type context: nova.context.RequestContext 

1547 :param instance: the instance with a set of ports 

1548 :type instance: nova.objects.Instance 

1549 :param host: the host on which to bind the ports which 

1550 are attached to the instance 

1551 :type host: str 

1552 :param vnic_types: optional dict for the host port binding 

1553 :type vnic_types: dict of <port_id> : <vnic_type> 

1554 :param port_profiles: optional dict per port ID for the host port 

1555 binding profile. 

1556 note that the port binding profile is mutable 

1557 via the networking "Port Binding" API so callers that 

1558 pass in a profile should ensure they have the latest 

1559 version from neutron with their changes merged, 

1560 which can be determined using the "revision_number" 

1561 attribute of the port. 

1562 :type port_profiles: dict of <port_id> : <port_profile> 

1563 :raises: PortBindingFailed if any of the ports failed to be bound to 

1564 the destination host 

1565 :returns: dict, keyed by port ID, of a new host port 

1566 binding dict per port that was bound 

1567 """ 

1568 # Get the current ports off the instance. This assumes the cache is 

1569 # current. 

1570 network_info = instance.get_network_info() 

1571 

1572 if not network_info: 

1573 # The instance doesn't have any ports so there is nothing to do. 

1574 LOG.debug('Instance does not have any ports.', instance=instance) 

1575 return {} 

1576 

1577 client = get_client(context, admin=True) 

1578 

1579 bindings_by_port_id: ty.Dict[str, ty.Any] = {} 

1580 for vif in network_info: 

1581 # Now bind each port to the destination host and keep track of each 

1582 # port that is bound to the resulting binding so we can rollback in 

1583 # the event of a failure, or return the results if everything is OK 

1584 port_id = vif['id'] 

1585 binding = dict(host=host) 

1586 if vnic_types is None or port_id not in vnic_types: 

1587 binding['vnic_type'] = vif['vnic_type'] 

1588 else: 

1589 binding['vnic_type'] = vnic_types[port_id] 

1590 

1591 if port_profiles is None or port_id not in port_profiles: 

1592 binding['profile'] = vif['profile'] 

1593 else: 

1594 binding['profile'] = port_profiles[port_id] 

1595 

1596 data = {'binding': binding} 

1597 try: 

1598 binding = client.create_port_binding(port_id, data)['binding'] 

1599 except neutron_client_exc.NeutronClientException: 

1600 # Something failed, so log the error and rollback any 

1601 # successful bindings. 

1602 LOG.error('Binding failed for port %s and host %s.', 

1603 port_id, host, instance=instance, exc_info=True) 

1604 for rollback_port_id in bindings_by_port_id: 

1605 try: 

1606 client.delete_port_binding(rollback_port_id, host) 

1607 except neutron_client_exc.NeutronClientException as exc: 

1608 if exc.status_code != 404: 1608 ↛ 1604line 1608 didn't jump to line 1604 because the condition on line 1608 was always true

1609 LOG.warning('Failed to remove binding for port %s ' 

1610 'on host %s.', rollback_port_id, host, 

1611 instance=instance) 

1612 raise exception.PortBindingFailed(port_id=port_id) 

1613 

1614 bindings_by_port_id[port_id] = binding 

1615 

1616 return bindings_by_port_id 

1617 

1618 def delete_port_binding(self, context, port_id, host): 

1619 """Delete the port binding for the given port ID and host 

1620 

1621 This method should not be used if "has_port_binding_extension" 

1622 returns False. 

1623 

1624 :param context: The request context for the operation. 

1625 :param port_id: The ID of the port with a binding to the host. 

1626 :param host: The host from which port bindings should be deleted. 

1627 :raises: nova.exception.PortBindingDeletionFailed if a non-404 error 

1628 response is received from neutron. 

1629 """ 

1630 client = get_client(context, admin=True) 

1631 try: 

1632 client.delete_port_binding(port_id, host) 

1633 except neutron_client_exc.NeutronClientException as exc: 

1634 # We can safely ignore 404s since we're trying to delete 

1635 # the thing that wasn't found anyway. 

1636 if exc.status_code != 404: 

1637 LOG.error( 

1638 'Unexpected error trying to delete binding for port %s ' 

1639 'and host %s.', port_id, host, exc_info=True) 

1640 raise exception.PortBindingDeletionFailed( 

1641 port_id=port_id, host=host) 

1642 

1643 def _get_vf_pci_device_profile(self, pci_dev): 

1644 """Get VF-specific fields to add to the PCI device profile. 

1645 

1646 This data can be useful, e.g. for off-path networking backends that 

1647 need to do the necessary plumbing in order to set a VF up for packet 

1648 forwarding. 

1649 """ 

1650 vf_profile: ty.Dict[str, ty.Union[str, int]] = {} 

1651 

1652 pf_mac = pci_dev.sriov_cap.get('pf_mac_address') 

1653 vf_num = pci_dev.sriov_cap.get('vf_num') 

1654 card_serial_number = pci_dev.card_serial_number 

1655 

1656 if card_serial_number is not None: 

1657 vf_profile['card_serial_number'] = card_serial_number 

1658 if pf_mac is not None: 

1659 vf_profile['pf_mac_address'] = pf_mac 

1660 if vf_num is not None: 

1661 vf_profile['vf_num'] = vf_num 

1662 

1663 # Update port binding capabilities using PCI device's network 

1664 # capabilities if they exist. 

1665 pci_net_caps = pci_dev.network_caps 

1666 if pci_net_caps: 

1667 vf_profile.update({'capabilities': pci_net_caps}) 

1668 

1669 return vf_profile 

1670 

1671 def _get_pci_device_profile(self, pci_dev): 

1672 dev_spec = self.pci_whitelist.get_devspec(pci_dev) 

1673 if dev_spec: 

1674 dev_profile = { 

1675 'pci_vendor_info': "%s:%s" 

1676 % (pci_dev.vendor_id, pci_dev.product_id), 

1677 'pci_slot': pci_dev.address, 

1678 'physical_network': dev_spec.get_tags().get( 

1679 'physical_network' 

1680 ), 

1681 } 

1682 if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_VF: 

1683 dev_profile.update( 

1684 self._get_vf_pci_device_profile(pci_dev)) 

1685 

1686 if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF: 

1687 # In general the MAC address information flows from the neutron 

1688 # port to the device in the backend. Except for direct-physical 

1689 # ports. In that case the MAC address flows from the physical 

1690 # device, the PF, to the neutron port. So when such a port is 

1691 # being bound to a host the port's MAC address needs to be 

1692 # updated. Nova needs to put the new MAC into the binding 

1693 # profile. 

1694 if pci_dev.mac_address: 1694 ↛ 1697line 1694 didn't jump to line 1697 because the condition on line 1694 was always true

1695 dev_profile['device_mac_address'] = pci_dev.mac_address 

1696 

1697 return dev_profile 

1698 

1699 raise exception.PciDeviceNotFound(node_id=pci_dev.compute_node_id, 

1700 address=pci_dev.address) 

1701 

1702 def _populate_neutron_binding_profile(self, instance, pci_request_id, 

1703 port_req_body, 

1704 port_arq): 

1705 """Populate neutron binding:profile. 

1706 

1707 Populate it with SR-IOV related information 

1708 

1709 :raises PciDeviceNotFound: If a claimed PCI device for the given 

1710 pci_request_id cannot be found on the instance. 

1711 """ 

1712 if pci_request_id: 

1713 pci_devices = instance.get_pci_devices(request_id=pci_request_id) 

1714 if not pci_devices: 

1715 # The pci_request_id likely won't mean much except for tracing 

1716 # through the logs since it is generated per request. 

1717 LOG.error('Unable to find PCI device using PCI request ID in ' 

1718 'list of claimed instance PCI devices: %s. Is the ' 

1719 '[pci]device_spec configuration correct?', 

1720 # Convert to a primitive list to stringify it. 

1721 list(instance.pci_devices), instance=instance) 

1722 raise exception.PciDeviceNotFound( 

1723 _('PCI device not found for request ID %s.') % 

1724 pci_request_id) 

1725 pci_dev = pci_devices.pop() 

1726 profile = copy.deepcopy(get_binding_profile(port_req_body['port'])) 

1727 profile.update(self._get_pci_device_profile(pci_dev)) 

1728 port_req_body['port'][constants.BINDING_PROFILE] = profile 

1729 

1730 if port_arq: 

1731 # PCI SRIOV device according port ARQ 

1732 profile = copy.deepcopy(get_binding_profile(port_req_body['port'])) 

1733 profile.update(cyborg.get_arq_pci_device_profile(port_arq)) 

1734 port_req_body['port'][constants.BINDING_PROFILE] = profile 

1735 

1736 @staticmethod 

1737 def _populate_pci_mac_address(instance, pci_request_id, port_req_body): 

1738 """Add the updated MAC address value to the update_port request body. 

1739 

1740 Currently this is done only for PF passthrough. 

1741 """ 

1742 if pci_request_id is not None: 

1743 pci_devs = instance.get_pci_devices(request_id=pci_request_id) 

1744 if len(pci_devs) != 1: 

1745 # NOTE(ndipanov): We shouldn't ever get here since 

1746 # InstancePCIRequest instances built from network requests 

1747 # only ever index a single device, which needs to be 

1748 # successfully claimed for this to be called as part of 

1749 # allocate_networks method 

1750 LOG.error("PCI request %s does not have a " 

1751 "unique device associated with it. Unable to " 

1752 "determine MAC address", 

1753 pci_request_id, instance=instance) 

1754 return 

1755 pci_dev = pci_devs[0] 

1756 if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF: 

1757 try: 

1758 mac = pci_utils.get_mac_by_pci_address(pci_dev.address) 

1759 except exception.PciDeviceNotFoundById as e: 

1760 LOG.error( 

1761 "Could not determine MAC address for %(addr)s, " 

1762 "error: %(e)s", 

1763 {"addr": pci_dev.address, "e": e}, instance=instance) 

1764 else: 

1765 port_req_body['port']['mac_address'] = mac 

1766 

1767 def _populate_neutron_extension_values(self, context, instance, 

1768 pci_request_id, port_req_body, 

1769 network=None, neutron=None, 

1770 bind_host_id=None, 

1771 port_arq=None): 

1772 """Populate neutron extension values for the instance. 

1773 

1774 If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor. 

1775 """ 

1776 if neutron is None: 

1777 neutron = get_client(context) 

1778 

1779 port_req_body['port'][constants.BINDING_HOST_ID] = bind_host_id 

1780 self._populate_neutron_binding_profile(instance, 

1781 pci_request_id, 

1782 port_req_body, 

1783 port_arq) 

1784 

1785 if self.has_dns_extension(client=neutron): 1785 ↛ 1794line 1785 didn't jump to line 1794 because the condition on line 1785 was never true

1786 # If the DNS integration extension is enabled in Neutron, most 

1787 # ports will get their dns_name attribute set in the port create or 

1788 # update requests in allocate_for_instance. So we just add the 

1789 # dns_name attribute to the payload of those requests. The 

1790 # exception is when the port binding extension is enabled in 

1791 # Neutron and the port is on a network that has a non-blank 

1792 # dns_domain attribute. This case requires to be processed by 

1793 # method _update_port_dns_name 

1794 if (not network.get('dns_domain')): 

1795 port_req_body['port']['dns_name'] = instance.hostname 

1796 

1797 def _update_port_dns_name(self, context, instance, network, port_id, 

1798 neutron): 

1799 """Update an instance port dns_name attribute with instance.hostname. 

1800 

1801 The dns_name attribute of a port on a network with a non-blank 

1802 dns_domain attribute will be sent to the external DNS service 

1803 (Designate) if DNS integration is enabled in Neutron. This requires the 

1804 assignment of the dns_name to the port to be done with a Neutron client 

1805 using the user's context. allocate_for_instance uses a port with admin 

1806 context if the port binding extensions is enabled in Neutron. In this 

1807 case, we assign in this method the dns_name attribute to the port with 

1808 an additional update request. Only a very small fraction of ports will 

1809 require this additional update request. 

1810 """ 

1811 if self.has_dns_extension(client=neutron) and network.get( 

1812 'dns_domain'): 

1813 try: 

1814 port_req_body = {'port': {'dns_name': instance.hostname}} 

1815 neutron.update_port(port_id, port_req_body) 

1816 except neutron_client_exc.BadRequest: 

1817 LOG.warning('Neutron error: Instance hostname ' 

1818 '%(hostname)s is not a valid DNS name', 

1819 {'hostname': instance.hostname}, instance=instance) 

1820 msg = (_('Instance hostname %(hostname)s is not a valid DNS ' 

1821 'name') % {'hostname': instance.hostname}) 

1822 raise exception.InvalidInput(reason=msg) 

1823 

1824 def _reset_port_dns_name(self, network, port_id, client): 

1825 """Reset an instance port dns_name attribute to empty when using 

1826 external DNS service. 

1827 

1828 _unbind_ports uses a client with admin context to reset the dns_name if 

1829 the DNS extension is enabled and network does not have dns_domain set. 

1830 When external DNS service is enabled, we use this method to make the 

1831 request with a Neutron client using user's context, so that the DNS 

1832 record can be found under user's zone and domain. 

1833 """ 

1834 if self.has_dns_extension(client=client) and network.get( 

1835 'dns_domain'): 

1836 try: 

1837 port_req_body = {'port': {'dns_name': ''}} 

1838 client.update_port(port_id, port_req_body) 

1839 except neutron_client_exc.NeutronClientException: 

1840 LOG.exception("Failed to reset dns_name for port %s", port_id) 

1841 

1842 def _delete_ports(self, neutron, instance, ports, raise_if_fail=False): 

1843 exceptions = [] 

1844 for port in ports: 

1845 try: 

1846 neutron.delete_port(port) 

1847 except neutron_client_exc.NeutronClientException as e: 

1848 if e.status_code == 404: 

1849 LOG.warning("Port %s does not exist", port, 

1850 instance=instance) 

1851 else: 

1852 exceptions.append(e) 

1853 LOG.warning("Failed to delete port %s for instance.", 

1854 port, instance=instance, exc_info=True) 

1855 if len(exceptions) > 0 and raise_if_fail: 

1856 raise exceptions[0] 

1857 

1858 def deallocate_for_instance(self, context, instance, **kwargs): 

1859 """Deallocate all network resources related to the instance.""" 

1860 LOG.debug('deallocate_for_instance()', instance=instance) 

1861 search_opts = {'device_id': instance.uuid} 

1862 neutron = get_client(context) 

1863 data = neutron.list_ports(**search_opts) 

1864 ports = {port['id'] for port in data.get('ports', [])} 

1865 

1866 requested_networks = kwargs.get('requested_networks') or [] 

1867 # NOTE(danms): Temporary and transitional 

1868 if isinstance(requested_networks, objects.NetworkRequestList): 

1869 requested_networks = requested_networks.as_tuples() 

1870 ports_to_skip = set([port_id for nets, fips, port_id, pci_request_id, 

1871 arq_uuid, device_profile in requested_networks]) 

1872 # NOTE(boden): requested_networks only passed in when deallocating 

1873 # from a failed build / spawn call. Therefore we need to include 

1874 # preexisting ports when deallocating from a standard delete op 

1875 # in which case requested_networks is not provided. 

1876 ports_to_skip |= set(self._get_preexisting_port_ids(instance)) 

1877 ports = set(ports) - ports_to_skip 

1878 

1879 # Reset device_id and device_owner for the ports that are skipped 

1880 self._unbind_ports(context, ports_to_skip, neutron) 

1881 # Delete the rest of the ports 

1882 self._delete_ports(neutron, instance, ports, raise_if_fail=True) 

1883 

1884 # deallocate vifs (mac addresses) 

1885 objects.VirtualInterface.delete_by_instance_uuid( 

1886 context, instance.uuid) 

1887 

1888 # NOTE(arosen): This clears out the network_cache only if the instance 

1889 # hasn't already been deleted. This is needed when an instance fails to 

1890 # launch and is rescheduled onto another compute node. If the instance 

1891 # has already been deleted this call does nothing. 

1892 update_instance_cache_with_nw_info(self, context, instance, 

1893 network_model.NetworkInfo([])) 

1894 

1895 def deallocate_port_for_instance(self, context, instance, port_id): 

1896 """Remove a specified port from the instance. 

1897 

1898 :param context: the request context 

1899 :param instance: the instance object the port is detached from 

1900 :param port_id: the UUID of the port being detached 

1901 :return: A NetworkInfo, port_allocation tuple where the 

1902 port_allocation is a dict which contains the resource 

1903 allocation of the port per resource provider uuid. E.g.: 

1904 { 

1905 rp_uuid: { 

1906 "resources": { 

1907 "NET_BW_EGR_KILOBIT_PER_SEC": 10000, 

1908 "NET_BW_IGR_KILOBIT_PER_SEC": 20000, 

1909 } 

1910 } 

1911 } 

1912 Note that right now this dict only contains a single key as a 

1913 neutron port only allocates from a single resource provider. 

1914 """ 

1915 # We need to use an admin client as the port.resource_request is admin 

1916 # only 

1917 neutron_admin = get_client(context, admin=True) 

1918 neutron = get_client(context) 

1919 port_allocation: ty.Dict = {} 

1920 try: 

1921 # NOTE(gibi): we need to read the port resource information from 

1922 # neutron here as we might delete the port below 

1923 port = neutron_admin.show_port(port_id)['port'] 

1924 except exception.PortNotFound: 

1925 LOG.debug('Unable to determine port %s resource allocation ' 

1926 'information as the port no longer exists.', port_id) 

1927 port = None 

1928 

1929 preexisting_ports = self._get_preexisting_port_ids(instance) 

1930 if port_id in preexisting_ports: 

1931 self._unbind_ports(context, [port_id], neutron) 

1932 else: 

1933 self._delete_ports(neutron, instance, [port_id], 

1934 raise_if_fail=True) 

1935 

1936 # Delete the VirtualInterface for the given port_id. 

1937 vif = objects.VirtualInterface.get_by_uuid(context, port_id) 

1938 if vif: 

1939 self._delete_nic_metadata(instance, vif) 

1940 vif.destroy() 

1941 else: 

1942 LOG.debug('VirtualInterface not found for port: %s', 

1943 port_id, instance=instance) 

1944 

1945 if port: 

1946 # if there is resource associated to this port then that needs to 

1947 # be deallocated so lets return info about such allocation 

1948 resource_request = port.get(constants.RESOURCE_REQUEST) or {} 

1949 profile = get_binding_profile(port) 

1950 if self.has_extended_resource_request_extension(context, neutron): 

1951 # new format 

1952 groups = resource_request.get(constants.REQUEST_GROUPS) 

1953 if groups: 1953 ↛ 1985line 1953 didn't jump to line 1985 because the condition on line 1953 was always true

1954 allocated_rps = profile.get(constants.ALLOCATION) 

1955 for group in groups: 

1956 allocated_rp = allocated_rps[group['id']] 

1957 port_allocation[allocated_rp] = { 

1958 "resources": group.get("resources", {}) 

1959 } 

1960 else: 

1961 # legacy format 

1962 allocated_rp = profile.get(constants.ALLOCATION) 

1963 if resource_request and allocated_rp: 

1964 port_allocation = { 

1965 allocated_rp: { 

1966 "resources": resource_request.get("resources", {}) 

1967 } 

1968 } 

1969 else: 

1970 # Check the info_cache. If the port is still in the info_cache and 

1971 # in that cache there is allocation in the profile then we suspect 

1972 # that the port is disappeared without deallocating the resources. 

1973 for vif in instance.get_network_info(): 1973 ↛ 1985line 1973 didn't jump to line 1985 because the loop on line 1973 didn't complete

1974 if vif['id'] == port_id: 

1975 profile = vif.get('profile') or {} 

1976 rp_uuid = profile.get(constants.ALLOCATION) 

1977 if rp_uuid: 

1978 LOG.warning( 

1979 'Port %s disappeared during deallocate but it had ' 

1980 'resource allocation on resource provider %s. ' 

1981 'Resource allocation for this port may be ' 

1982 'leaked.', port_id, rp_uuid, instance=instance) 

1983 break 

1984 

1985 return self.get_instance_nw_info(context, instance), port_allocation 

1986 

1987 def _delete_nic_metadata(self, instance, vif): 

1988 if not instance.device_metadata: 

1989 # nothing to delete 

1990 return 

1991 

1992 for device in instance.device_metadata.devices: 1992 ↛ exitline 1992 didn't return from function '_delete_nic_metadata' because the loop on line 1992 didn't complete

1993 if (isinstance(device, objects.NetworkInterfaceMetadata) and 1993 ↛ 1992line 1993 didn't jump to line 1992 because the condition on line 1993 was always true

1994 device.mac == vif.address): 

1995 instance.device_metadata.devices.remove(device) 

1996 instance.save() 

1997 break 

1998 

1999 def list_ports(self, context, **search_opts): 

2000 """List ports for the client based on search options.""" 

2001 return get_client(context).list_ports(**search_opts) 

2002 

2003 def show_port(self, context, port_id): 

2004 """Return the port for the client given the port id. 

2005 

2006 :param context: Request context. 

2007 :param port_id: The id of port to be queried. 

2008 :returns: A dict containing port data keyed by 'port', e.g. 

2009 

2010 :: 

2011 

2012 {'port': {'port_id': 'abcd', 

2013 'fixed_ip_address': '1.2.3.4'}} 

2014 """ 

2015 return dict(port=self._show_port(context, port_id)) 

2016 

2017 def _show_port(self, context, port_id, neutron_client=None, fields=None): 

2018 """Return the port for the client given the port id. 

2019 

2020 :param context: Request context. 

2021 :param port_id: The id of port to be queried. 

2022 :param neutron_client: A neutron client. 

2023 :param fields: The condition fields to query port data. 

2024 :returns: A dict of port data. 

2025 e.g. {'port_id': 'abcd', 'fixed_ip_address': '1.2.3.4'} 

2026 """ 

2027 if not neutron_client: 

2028 neutron_client = get_client(context) 

2029 try: 

2030 if fields: 

2031 result = neutron_client.show_port(port_id, fields=fields) 

2032 else: 

2033 result = neutron_client.show_port(port_id) 

2034 return result.get('port') 

2035 except neutron_client_exc.PortNotFoundClient: 

2036 raise exception.PortNotFound(port_id=port_id) 

2037 except neutron_client_exc.Unauthorized: 

2038 raise exception.Forbidden() 

2039 except neutron_client_exc.NeutronClientException as exc: 

2040 msg = (_("Failed to access port %(port_id)s: %(reason)s") % 

2041 {'port_id': port_id, 'reason': exc}) 

2042 raise exception.NovaException(message=msg) 

2043 

2044 def get_instance_nw_info(self, context, instance, **kwargs): 

2045 """Returns all network info related to an instance.""" 

2046 with lockutils.lock('refresh_cache-%s' % instance.uuid): 

2047 result = self._get_instance_nw_info(context, instance, **kwargs) 

2048 update_instance_cache_with_nw_info(self, context, instance, 

2049 nw_info=result) 

2050 return result 

2051 

2052 def _get_instance_nw_info(self, context, instance, networks=None, 

2053 port_ids=None, admin_client=None, 

2054 preexisting_port_ids=None, 

2055 refresh_vif_id=None, force_refresh=False, 

2056 **kwargs): 

2057 # NOTE(danms): This is an inner method intended to be called 

2058 # by other code that updates instance nwinfo. It *must* be 

2059 # called with the refresh_cache-%(instance_uuid) lock held! 

2060 if force_refresh: 

2061 LOG.debug('Forcefully refreshing network info cache for instance', 

2062 instance=instance) 

2063 elif refresh_vif_id: 

2064 LOG.debug('Refreshing network info cache for port %s', 

2065 refresh_vif_id, instance=instance) 

2066 else: 

2067 LOG.debug('Building network info cache for instance', 

2068 instance=instance) 

2069 # Ensure that we have an up to date copy of the instance info cache. 

2070 # Otherwise multiple requests could collide and cause cache 

2071 # corruption. 

2072 compute_utils.refresh_info_cache_for_instance(context, instance) 

2073 nw_info = self._build_network_info_model(context, instance, networks, 

2074 port_ids, admin_client, 

2075 preexisting_port_ids, 

2076 refresh_vif_id, 

2077 force_refresh=force_refresh) 

2078 return network_model.NetworkInfo.hydrate(nw_info) 

2079 

2080 def _gather_port_ids_and_networks(self, context, instance, networks=None, 

2081 port_ids=None, neutron=None): 

2082 """Return an instance's complete list of port_ids and networks. 

2083 

2084 The results are based on the instance info_cache in the nova db, not 

2085 the instance's current list of ports in neutron. 

2086 """ 

2087 

2088 if ((networks is None and port_ids is not None) or 

2089 (port_ids is None and networks is not None)): 

2090 message = _("This method needs to be called with either " 

2091 "networks=None and port_ids=None or port_ids and " 

2092 "networks as not none.") 

2093 raise exception.NovaException(message=message) 

2094 

2095 ifaces = instance.get_network_info() 

2096 # This code path is only done when refreshing the network_cache 

2097 if port_ids is None: 

2098 port_ids = [iface['id'] for iface in ifaces] 

2099 net_ids = [iface['network']['id'] for iface in ifaces] 

2100 

2101 if networks is None: 

2102 networks = self._get_available_networks(context, 

2103 instance.project_id, 

2104 net_ids, neutron) 

2105 # an interface was added/removed from instance. 

2106 else: 

2107 

2108 # Prepare the network ids list for validation purposes 

2109 networks_ids = [network['id'] for network in networks] 

2110 

2111 # Validate that interface networks doesn't exist in networks. 

2112 # Though this issue can and should be solved in methods 

2113 # that prepare the networks list, this method should have this 

2114 # ignore-duplicate-networks/port-ids mechanism to reduce the 

2115 # probability of failing to boot the VM. 

2116 networks = networks + [ 

2117 {'id': iface['network']['id'], 

2118 'name': iface['network']['label'], 

2119 'tenant_id': iface['network']['meta']['tenant_id']} 

2120 for iface in ifaces 

2121 if _is_not_duplicate(iface['network']['id'], 

2122 networks_ids, 

2123 "networks", 

2124 instance)] 

2125 

2126 # Include existing interfaces so they are not removed from the db. 

2127 # Validate that the interface id is not in the port_ids 

2128 port_ids = [iface['id'] for iface in ifaces 

2129 if _is_not_duplicate(iface['id'], 

2130 port_ids, 

2131 "port_ids", 

2132 instance)] + port_ids 

2133 

2134 return networks, port_ids 

2135 

2136 @refresh_cache 

2137 def add_fixed_ip_to_instance(self, context, instance, network_id): 

2138 """Add a fixed IP to the instance from specified network.""" 

2139 neutron = get_client(context) 

2140 search_opts = {'network_id': network_id} 

2141 data = neutron.list_subnets(**search_opts) 

2142 ipam_subnets = data.get('subnets', []) 

2143 if not ipam_subnets: 2143 ↛ 2144line 2143 didn't jump to line 2144 because the condition on line 2143 was never true

2144 raise exception.NetworkNotFoundForInstance( 

2145 instance_id=instance.uuid) 

2146 

2147 zone = 'compute:%s' % instance.availability_zone 

2148 search_opts = {'device_id': instance.uuid, 

2149 'device_owner': zone, 

2150 'network_id': network_id} 

2151 data = neutron.list_ports(**search_opts) 

2152 ports = data['ports'] 

2153 for p in ports: 2153 ↛ 2168line 2153 didn't jump to line 2168 because the loop on line 2153 didn't complete

2154 for subnet in ipam_subnets: 2154 ↛ 2153line 2154 didn't jump to line 2153 because the loop on line 2154 didn't complete

2155 fixed_ips = p['fixed_ips'] 

2156 fixed_ips.append({'subnet_id': subnet['id']}) 

2157 port_req_body = {'port': {'fixed_ips': fixed_ips}} 

2158 try: 

2159 neutron.update_port(p['id'], port_req_body) 

2160 return self._get_instance_nw_info(context, instance) 

2161 except Exception as ex: 

2162 msg = ("Unable to update port %(portid)s on subnet " 

2163 "%(subnet_id)s with failure: %(exception)s") 

2164 LOG.debug(msg, {'portid': p['id'], 

2165 'subnet_id': subnet['id'], 

2166 'exception': ex}, instance=instance) 

2167 

2168 raise exception.NetworkNotFoundForInstance( 

2169 instance_id=instance.uuid) 

2170 

2171 @refresh_cache 

2172 def remove_fixed_ip_from_instance(self, context, instance, address): 

2173 """Remove a fixed IP from the instance.""" 

2174 neutron = get_client(context) 

2175 zone = 'compute:%s' % instance.availability_zone 

2176 search_opts = {'device_id': instance.uuid, 

2177 'device_owner': zone, 

2178 'fixed_ips': 'ip_address=%s' % address} 

2179 data = neutron.list_ports(**search_opts) 

2180 ports = data['ports'] 

2181 for p in ports: 2181 ↛ 2197line 2181 didn't jump to line 2197 because the loop on line 2181 didn't complete

2182 fixed_ips = p['fixed_ips'] 

2183 new_fixed_ips = [] 

2184 for fixed_ip in fixed_ips: 2184 ↛ 2185line 2184 didn't jump to line 2185 because the loop on line 2184 never started

2185 if fixed_ip['ip_address'] != address: 

2186 new_fixed_ips.append(fixed_ip) 

2187 port_req_body = {'port': {'fixed_ips': new_fixed_ips}} 

2188 try: 

2189 neutron.update_port(p['id'], port_req_body) 

2190 except Exception as ex: 

2191 msg = ("Unable to update port %(portid)s with" 

2192 " failure: %(exception)s") 

2193 LOG.debug(msg, {'portid': p['id'], 'exception': ex}, 

2194 instance=instance) 

2195 return self._get_instance_nw_info(context, instance) 

2196 

2197 raise exception.FixedIpNotFoundForInstance( 

2198 instance_uuid=instance.uuid, ip=address) 

2199 

2200 def _get_physnet_tunneled_info(self, context, neutron, net_id): 

2201 """Retrieve detailed network info. 

2202 

2203 :param context: The request context. 

2204 :param neutron: The neutron client object. 

2205 :param net_id: The ID of the network to retrieve information for. 

2206 

2207 :return: A tuple containing the physnet name, if defined, and the 

2208 tunneled status of the network. If the network uses multiple 

2209 segments, the first segment that defines a physnet value will be 

2210 used for the physnet name. 

2211 """ 

2212 if self.has_multi_provider_extension(client=neutron): 

2213 network = neutron.show_network(net_id, 

2214 fields='segments').get('network') 

2215 segments = network.get('segments', {}) 

2216 for net in segments: 

2217 # NOTE(vladikr): In general, "multi-segments" network is a 

2218 # combination of L2 segments. The current implementation 

2219 # contains a vxlan and vlan(s) segments, where only a vlan 

2220 # network will have a physical_network specified, but may 

2221 # change in the future. The purpose of this method 

2222 # is to find a first segment that provides a physical network. 

2223 # TODO(vladikr): Additional work will be required to handle the 

2224 # case of multiple vlan segments associated with different 

2225 # physical networks. 

2226 physnet_name = net.get('provider:physical_network') 

2227 if physnet_name: 

2228 return physnet_name, False 

2229 

2230 # Raising here as at least one segment should 

2231 # have a physical network provided. 

2232 if segments: 

2233 msg = (_("None of the segments of network %s provides a " 

2234 "physical_network") % net_id) 

2235 raise exception.NovaException(message=msg) 

2236 

2237 net = neutron.show_network( 

2238 net_id, fields=['provider:physical_network', 

2239 'provider:network_type']).get('network') 

2240 return (net.get('provider:physical_network'), 

2241 net.get('provider:network_type') in constants.L3_NETWORK_TYPES) 

2242 

2243 @staticmethod 

2244 def _get_trusted_mode_from_port(port): 

2245 """Returns whether trusted mode is requested 

2246 

2247 If port binding does not provide any information about trusted 

2248 status this function is returning None 

2249 """ 

2250 value = get_binding_profile(port).get('trusted') 

2251 if value is not None: 

2252 # This allows the user to specify things like '1' and 'yes' in 

2253 # the port binding profile and we can handle it as a boolean. 

2254 return strutils.bool_from_string(value) 

2255 

2256 @staticmethod 

2257 def _is_remote_managed(vnic_type): 

2258 """Determine if the port is remote_managed or not by VNIC type. 

2259 

2260 :param str vnic_type: The VNIC type to assess. 

2261 :return: A boolean indicator whether the NIC is remote managed or not. 

2262 :rtype: bool 

2263 """ 

2264 return vnic_type == network_model.VNIC_TYPE_REMOTE_MANAGED 

2265 

2266 def is_remote_managed_port(self, context, port_id): 

2267 """Determine if a port has a REMOTE_MANAGED VNIC type. 

2268 

2269 :param context: The request context 

2270 :param port_id: The id of the Neutron port 

2271 """ 

2272 port = self.show_port(context, port_id)['port'] 

2273 return self._is_remote_managed( 

2274 port.get('binding:vnic_type', network_model.VNIC_TYPE_NORMAL) 

2275 ) 

2276 

2277 # NOTE(sean-k-mooney): we might want to have this return a 

2278 # nova.network.model.VIF object instead in the future. 

2279 def _get_port_vnic_info(self, context, neutron, port_id): 

2280 """Retrieve port vNIC info 

2281 

2282 :param context: The request context 

2283 :param neutron: The Neutron client 

2284 :param port_id: The id of port to be queried 

2285 

2286 :return: A tuple of vNIC type, trusted status, network ID, resource 

2287 request of the port if any and port numa affinity policy, 

2288 and device_profile. 

2289 Trusted status only affects SR-IOV ports and will always be 

2290 None for other port types. If no port numa policy is 

2291 requested by a port, None will be returned. 

2292 """ 

2293 fields = ['binding:vnic_type', constants.BINDING_PROFILE, 

2294 'network_id', constants.RESOURCE_REQUEST, 

2295 constants.NUMA_POLICY, 'device_profile'] 

2296 port = self._show_port( 

2297 context, port_id, neutron_client=neutron, fields=fields) 

2298 network_id = port.get('network_id') 

2299 trusted = None 

2300 vnic_type = port.get('binding:vnic_type', 

2301 network_model.VNIC_TYPE_NORMAL) 

2302 if vnic_type in network_model.VNIC_TYPES_SRIOV: 

2303 trusted = self._get_trusted_mode_from_port(port) 

2304 

2305 # NOTE(gibi): Get the port resource_request which may or may not be 

2306 # set depending on neutron configuration, e.g. if QoS rules are 

2307 # applied to the port/network and the port-resource-request API 

2308 # extension is enabled. 

2309 resource_request = port.get(constants.RESOURCE_REQUEST, None) 

2310 numa_policy = port.get(constants.NUMA_POLICY, None) 

2311 device_profile = port.get("device_profile", None) 

2312 return (vnic_type, trusted, network_id, resource_request, 

2313 numa_policy, device_profile) 

2314 

2315 def support_create_with_resource_request(self, context): 

2316 """Returns false if neutron is configured with extended resource 

2317 request which is not currently supported. 

2318 

2319 This function is only here temporarily to help mocking this check in 

2320 the functional test environment. 

2321 """ 

2322 return not (self.has_extended_resource_request_extension(context)) 

2323 

2324 def create_resource_requests( 

2325 self, context, requested_networks, pci_requests=None, 

2326 affinity_policy=None): 

2327 """Retrieve all information for the networks passed at the time of 

2328 creating the server. 

2329 

2330 :param context: The request context. 

2331 :param requested_networks: The networks requested for the server. 

2332 :type requested_networks: nova.objects.NetworkRequestList 

2333 :param pci_requests: The list of PCI requests to which additional PCI 

2334 requests created here will be added. 

2335 :type pci_requests: nova.objects.InstancePCIRequests 

2336 :param affinity_policy: requested pci numa affinity policy 

2337 :type affinity_policy: nova.objects.fields.PCINUMAAffinityPolicy 

2338 

2339 :returns: A three tuple with an instance of ``objects.NetworkMetadata`` 

2340 for use by the scheduler or None, a list of RequestGroup 

2341 objects representing the resource needs of each requested port and 

2342 a RequestLevelParam object that contains global scheduling 

2343 instructions not specific to any of the RequestGroups 

2344 """ 

2345 if not requested_networks or requested_networks.no_allocate: 

2346 return None, [], None 

2347 

2348 physnets = set() 

2349 tunneled = False 

2350 

2351 neutron = get_client(context, admin=True) 

2352 has_extended_resource_request_extension = ( 

2353 self.has_extended_resource_request_extension(context, neutron)) 

2354 resource_requests = [] 

2355 request_level_params = objects.RequestLevelParams() 

2356 

2357 for request_net in requested_networks: 

2358 physnet = None 

2359 trusted = None 

2360 tunneled_ = False 

2361 vnic_type = network_model.VNIC_TYPE_NORMAL 

2362 pci_request_id = None 

2363 requester_id = None 

2364 port_numa_policy = None 

2365 

2366 if request_net.port_id: 

2367 # InstancePCIRequest.requester_id is semantically linked 

2368 # to a port with a resource_request. 

2369 requester_id = request_net.port_id 

2370 (vnic_type, trusted, network_id, resource_request, 

2371 port_numa_policy, device_profile) = self._get_port_vnic_info( 

2372 context, neutron, request_net.port_id) 

2373 physnet, tunneled_ = self._get_physnet_tunneled_info( 

2374 context, neutron, network_id) 

2375 

2376 if vnic_type in network_model.VNIC_TYPES_ACCELERATOR: 

2377 # get request groups from cyborg profile 

2378 if not device_profile: 

2379 err = ('No device profile for port %s.' 

2380 % (request_net.port_id)) 

2381 raise exception.DeviceProfileError( 

2382 name=device_profile, msg=err) 

2383 cyclient = cyborg.get_client(context) 

2384 dp_groups = cyclient.get_device_profile_groups( 

2385 device_profile) 

2386 dev_num = cyborg.get_device_amount_of_dp_groups(dp_groups) 

2387 if dev_num > 1: 2387 ↛ 2388line 2387 didn't jump to line 2388 because the condition on line 2387 was never true

2388 err_msg = 'request multiple devices for single port.' 

2389 raise exception.DeviceProfileError(name=device_profile, 

2390 msg=err_msg) 

2391 

2392 dp_request_groups = (cyclient.get_device_request_groups( 

2393 dp_groups, owner=request_net.port_id)) 

2394 LOG.debug("device_profile request group(ARQ): %s", 

2395 dp_request_groups) 

2396 # keep device_profile to avoid get vnic info again 

2397 request_net.device_profile = device_profile 

2398 resource_requests.extend(dp_request_groups) 

2399 

2400 if resource_request: 

2401 if has_extended_resource_request_extension: 

2402 # need to handle the new resource request format 

2403 # NOTE(gibi): explicitly orphan the RequestGroup by 

2404 # setting context=None as we never intended to save it 

2405 # to the DB. 

2406 resource_requests.extend( 

2407 objects.RequestGroup.from_extended_port_request( 

2408 context=None, 

2409 port_resource_request=resource_request)) 

2410 request_level_params.extend_with( 

2411 objects.RequestLevelParams.from_port_request( 

2412 port_resource_request=resource_request)) 

2413 else: 

2414 # keep supporting the old format of the 

2415 # resource_request 

2416 # NOTE(gibi): explicitly orphan the RequestGroup by 

2417 # setting context=None as we never intended to save it 

2418 # to the DB. 

2419 resource_requests.append( 

2420 objects.RequestGroup.from_port_request( 

2421 context=None, 

2422 port_uuid=request_net.port_id, 

2423 port_resource_request=resource_request)) 

2424 

2425 elif request_net.network_id and not request_net.auto_allocate: 

2426 network_id = request_net.network_id 

2427 physnet, tunneled_ = self._get_physnet_tunneled_info( 

2428 context, neutron, network_id) 

2429 

2430 # All tunneled traffic must use the same logical NIC so we just 

2431 # need to know if there is one or more tunneled networks present. 

2432 tunneled = tunneled or tunneled_ 

2433 

2434 # ...conversely, there can be multiple physnets, which will 

2435 # generally be mapped to different NICs, and some requested 

2436 # networks may use the same physnet. As a result, we need to know 

2437 # the *set* of physnets from every network requested 

2438 if physnet: 

2439 physnets.add(physnet) 

2440 

2441 if vnic_type in network_model.VNIC_TYPES_SRIOV: 

2442 # TODO(moshele): To differentiate between the SR-IOV legacy 

2443 # and SR-IOV ovs hardware offload we will leverage the nic 

2444 # feature based scheduling in nova. This mean we will need 

2445 # libvirt to expose the nic feature. At the moment 

2446 # there is a limitation that deployers cannot use both 

2447 # SR-IOV modes (legacy and ovs) in the same deployment. 

2448 spec = { 

2449 pci_request.PCI_NET_TAG: physnet, 

2450 # Convert the value to string since tags are compared as 

2451 # string values case-insensitively. 

2452 pci_request.PCI_REMOTE_MANAGED_TAG: 

2453 str(self._is_remote_managed(vnic_type)), 

2454 } 

2455 dev_type = pci_request.DEVICE_TYPE_FOR_VNIC_TYPE.get(vnic_type) 

2456 if dev_type: 

2457 spec[pci_request.PCI_DEVICE_TYPE_TAG] = dev_type 

2458 if trusted is not None: 

2459 # We specifically have requested device on a pool 

2460 # with a tag trusted set to true or false. We 

2461 # convert the value to string since tags are 

2462 # compared in that way. 

2463 spec[pci_request.PCI_TRUSTED_TAG] = str(trusted) 

2464 request = objects.InstancePCIRequest( 

2465 count=1, 

2466 spec=[spec], 

2467 request_id=uuidutils.generate_uuid(), 

2468 requester_id=requester_id) 

2469 # NOTE(sean-k-mooney): port NUMA policies take precedence 

2470 # over image and flavor policies. 

2471 numa_policy = port_numa_policy or affinity_policy 

2472 if numa_policy: 2472 ↛ 2473line 2472 didn't jump to line 2473 because the condition on line 2472 was never true

2473 request.numa_policy = numa_policy 

2474 pci_requests.requests.append(request) 

2475 pci_request_id = request.request_id 

2476 

2477 # Add pci_request_id into the requested network 

2478 request_net.pci_request_id = pci_request_id 

2479 

2480 return ( 

2481 objects.NetworkMetadata(physnets=physnets, tunneled=tunneled), 

2482 resource_requests, 

2483 request_level_params 

2484 ) 

2485 

2486 def _can_auto_allocate_network(self, context, neutron): 

2487 """Helper method to determine if we can auto-allocate networks 

2488 

2489 :param context: nova request context 

2490 :param neutron: neutron client 

2491 :returns: True if it's possible to auto-allocate networks, False 

2492 otherwise. 

2493 """ 

2494 # run the dry-run validation, which will raise a 409 if not ready 

2495 try: 

2496 neutron.validate_auto_allocated_topology_requirements( 

2497 context.project_id) 

2498 LOG.debug('Network auto-allocation is available for project ' 

2499 '%s', context.project_id) 

2500 return True 

2501 except neutron_client_exc.Conflict as ex: 

2502 LOG.debug('Unable to auto-allocate networks. %s', 

2503 str(ex)) 

2504 return False 

2505 

2506 def _auto_allocate_network(self, instance, neutron): 

2507 """Automatically allocates a network for the given project. 

2508 

2509 :param instance: create the network for the project that owns this 

2510 instance 

2511 :param neutron: neutron client 

2512 :returns: Details of the network that was created. 

2513 :raises: nova.exception.UnableToAutoAllocateNetwork 

2514 :raises: nova.exception.NetworkNotFound 

2515 """ 

2516 project_id = instance.project_id 

2517 LOG.debug('Automatically allocating a network for project %s.', 

2518 project_id, instance=instance) 

2519 try: 

2520 topology = neutron.get_auto_allocated_topology( 

2521 project_id)['auto_allocated_topology'] 

2522 except neutron_client_exc.Conflict: 

2523 raise exception.UnableToAutoAllocateNetwork(project_id=project_id) 

2524 

2525 try: 

2526 network = neutron.show_network(topology['id'])['network'] 

2527 except neutron_client_exc.NetworkNotFoundClient: 

2528 # This shouldn't happen since we just created the network, but 

2529 # handle it anyway. 

2530 LOG.error('Automatically allocated network %(network_id)s ' 

2531 'was not found.', {'network_id': topology['id']}, 

2532 instance=instance) 

2533 raise exception.UnableToAutoAllocateNetwork(project_id=project_id) 

2534 

2535 LOG.debug('Automatically allocated network: %s', network, 

2536 instance=instance) 

2537 return network 

2538 

2539 def _ports_needed_per_instance(self, context, neutron, requested_networks): 

2540 

2541 # TODO(danms): Remove me when all callers pass an object 

2542 if requested_networks and isinstance(requested_networks[0], tuple): 

2543 requested_networks = objects.NetworkRequestList.from_tuples( 

2544 requested_networks) 

2545 

2546 ports_needed_per_instance = 0 

2547 if (requested_networks is None or len(requested_networks) == 0 or 

2548 requested_networks.auto_allocate): 

2549 nets = self._get_available_networks(context, context.project_id, 

2550 neutron=neutron) 

2551 if len(nets) > 1: 

2552 # Attaching to more than one network by default doesn't 

2553 # make sense, as the order will be arbitrary and the guest OS 

2554 # won't know which to configure 

2555 msg = _("Multiple possible networks found, use a Network " 

2556 "ID to be more specific.") 

2557 raise exception.NetworkAmbiguous(msg) 

2558 

2559 if not nets and ( 

2560 requested_networks and requested_networks.auto_allocate): 

2561 # If there are no networks available to this project and we 

2562 # were asked to auto-allocate a network, check to see that we 

2563 # can do that first. 

2564 LOG.debug('No networks are available for project %s; checking ' 

2565 'to see if we can automatically allocate a network.', 

2566 context.project_id) 

2567 if not self._can_auto_allocate_network(context, neutron): 

2568 raise exception.UnableToAutoAllocateNetwork( 

2569 project_id=context.project_id) 

2570 

2571 ports_needed_per_instance = 1 

2572 else: 

2573 net_ids_requested = [] 

2574 for request in requested_networks: 

2575 if request.port_id: 

2576 port = self._show_port(context, request.port_id, 

2577 neutron_client=neutron) 

2578 if port.get('device_id'): 

2579 raise exception.PortInUse(port_id=request.port_id) 

2580 

2581 deferred_ip = port.get('ip_allocation') == 'deferred' 

2582 ipless_port = port.get('ip_allocation') == 'none' 

2583 # NOTE(carl_baldwin) A deferred IP port doesn't have an 

2584 # address here. If it fails to get one later when nova 

2585 # updates it with host info, Neutron will error which 

2586 # raises an exception. 

2587 # NOTE(sbauza): We don't need to validate the 

2588 # 'connectivity' attribute of the port's 

2589 # 'binding:vif_details' to ensure it's 'l2', as Neutron 

2590 # already verifies it. 

2591 if ( 

2592 not (deferred_ip or ipless_port) and 

2593 not port.get('fixed_ips') 

2594 ): 

2595 raise exception.PortRequiresFixedIP( 

2596 port_id=request.port_id) 

2597 

2598 request.network_id = port['network_id'] 

2599 else: 

2600 ports_needed_per_instance += 1 

2601 net_ids_requested.append(request.network_id) 

2602 

2603 # NOTE(jecarey) There is currently a race condition. 

2604 # That is, if you have more than one request for a specific 

2605 # fixed IP at the same time then only one will be allocated 

2606 # the ip. The fixed IP will be allocated to only one of the 

2607 # instances that will run. The second instance will fail on 

2608 # spawn. That instance will go into error state. 

2609 # TODO(jecarey) Need to address this race condition once we 

2610 # have the ability to update mac addresses in Neutron. 

2611 if request.address: 

2612 # TODO(jecarey) Need to look at consolidating list_port 

2613 # calls once able to OR filters. 

2614 search_opts = {'network_id': request.network_id, 

2615 'fixed_ips': 'ip_address=%s' % ( 

2616 request.address), 

2617 'fields': 'device_id'} 

2618 existing_ports = neutron.list_ports( 

2619 **search_opts)['ports'] 

2620 if existing_ports: 

2621 i_uuid = existing_ports[0]['device_id'] 

2622 raise exception.FixedIpAlreadyInUse( 

2623 address=request.address, 

2624 instance_uuid=i_uuid) 

2625 

2626 # Now check to see if all requested networks exist 

2627 if net_ids_requested: 

2628 nets = self._get_available_networks( 

2629 context, context.project_id, net_ids_requested, 

2630 neutron=neutron) 

2631 

2632 for net in nets: 

2633 if not net.get('subnets'): 

2634 raise exception.NetworkRequiresSubnet( 

2635 network_uuid=net['id']) 

2636 

2637 if len(nets) != len(net_ids_requested): 

2638 requested_netid_set = set(net_ids_requested) 

2639 returned_netid_set = set([net['id'] for net in nets]) 

2640 lostid_set = requested_netid_set - returned_netid_set 

2641 if lostid_set: 

2642 id_str = '' 

2643 for _id in lostid_set: 

2644 id_str = id_str and id_str + ', ' + _id or _id 

2645 raise exception.NetworkNotFound(network_id=id_str) 

2646 return ports_needed_per_instance 

2647 

2648 def get_requested_resource_for_instance( 

2649 self, 

2650 context: nova_context.RequestContext, 

2651 instance_uuid: str 

2652 ) -> ty.Tuple[ 

2653 ty.List['objects.RequestGroup'], 'objects.RequestLevelParams']: 

2654 """Collect resource requests from the ports associated to the instance 

2655 

2656 :param context: nova request context 

2657 :param instance_uuid: The UUID of the instance 

2658 :return: A two tuple with a list of RequestGroup objects and a 

2659 RequestLevelParams object. 

2660 """ 

2661 

2662 # NOTE(gibi): We need to use an admin client as otherwise a non admin 

2663 # initiated resize causes that neutron does not fill the 

2664 # resource_request field of the port and this will lead to resource 

2665 # allocation issues. See bug 1849695 

2666 neutron = get_client(context, admin=True) 

2667 # get the ports associated to this instance 

2668 data = neutron.list_ports( 

2669 device_id=instance_uuid, fields=['id', constants.RESOURCE_REQUEST]) 

2670 resource_requests = [] 

2671 request_level_params = objects.RequestLevelParams() 

2672 extended_rr = self.has_extended_resource_request_extension( 

2673 context, neutron) 

2674 

2675 for port in data.get('ports', []): 

2676 resource_request = port.get(constants.RESOURCE_REQUEST) 

2677 if extended_rr and resource_request: 

2678 resource_requests.extend( 

2679 objects.RequestGroup.from_extended_port_request( 

2680 context=None, 

2681 port_resource_request=port['resource_request'])) 

2682 request_level_params.extend_with( 

2683 objects.RequestLevelParams.from_port_request( 

2684 port_resource_request=resource_request)) 

2685 else: 

2686 # keep supporting the old format of the resource_request 

2687 if resource_request: 

2688 # NOTE(gibi): explicitly orphan the RequestGroup by setting 

2689 # context=None as we never intended to save it to the DB. 

2690 resource_requests.append( 

2691 objects.RequestGroup.from_port_request( 

2692 context=None, port_uuid=port['id'], 

2693 port_resource_request=port['resource_request'])) 

2694 

2695 return resource_requests, request_level_params 

2696 

2697 def validate_networks(self, context, requested_networks, num_instances): 

2698 """Validate that the tenant can use the requested networks. 

2699 

2700 Return the number of instances than can be successfully allocated 

2701 with the requested network configuration. 

2702 """ 

2703 LOG.debug('validate_networks() for %s', requested_networks) 

2704 

2705 neutron = get_client(context) 

2706 ports_needed_per_instance = self._ports_needed_per_instance( 

2707 context, neutron, requested_networks) 

2708 

2709 # Note(PhilD): Ideally Nova would create all required ports as part of 

2710 # network validation, but port creation requires some details 

2711 # from the hypervisor. So we just check the quota and return 

2712 # how many of the requested number of instances can be created 

2713 if ports_needed_per_instance: 

2714 quotas = neutron.show_quota(context.project_id)['quota'] 

2715 if quotas.get('port', -1) == -1: 

2716 # Unlimited Port Quota 

2717 return num_instances 

2718 

2719 # We only need the port count so only ask for ids back. 

2720 params = dict(tenant_id=context.project_id, fields=['id']) 

2721 ports = neutron.list_ports(**params)['ports'] 

2722 free_ports = quotas.get('port') - len(ports) 

2723 if free_ports < 0: 

2724 msg = (_("The number of defined ports: %(ports)d " 

2725 "is over the limit: %(quota)d") % 

2726 {'ports': len(ports), 

2727 'quota': quotas.get('port')}) 

2728 raise exception.PortLimitExceeded(msg) 

2729 ports_needed = ports_needed_per_instance * num_instances 

2730 if free_ports >= ports_needed: 

2731 return num_instances 

2732 else: 

2733 return free_ports // ports_needed_per_instance 

2734 return num_instances 

2735 

2736 def _get_instance_uuids_by_ip(self, context, address): 

2737 """Retrieve instance uuids associated with the given IP address. 

2738 

2739 :returns: A list of dicts containing the uuids keyed by 'instance_uuid' 

2740 e.g. [{'instance_uuid': uuid}, ...] 

2741 """ 

2742 search_opts = {"fixed_ips": 'ip_address=%s' % address} 

2743 data = get_client(context).list_ports(**search_opts) 

2744 ports = data.get('ports', []) 

2745 return [{'instance_uuid': port['device_id']} for port in ports 

2746 if port['device_id']] 

2747 

2748 def _get_port_id_by_fixed_address(self, client, 

2749 instance, address): 

2750 """Return port_id from a fixed address.""" 

2751 zone = 'compute:%s' % instance.availability_zone 

2752 search_opts = {'device_id': instance.uuid, 

2753 'device_owner': zone} 

2754 data = client.list_ports(**search_opts) 

2755 ports = data['ports'] 

2756 port_id = None 

2757 for p in ports: 

2758 for ip in p['fixed_ips']: 

2759 if ip['ip_address'] == address: 

2760 port_id = p['id'] 

2761 break 

2762 if not port_id: 

2763 raise exception.FixedIpNotFoundForAddress(address=address) 

2764 return port_id 

2765 

2766 @refresh_cache 

2767 def associate_floating_ip(self, context, instance, 

2768 floating_address, fixed_address, 

2769 affect_auto_assigned=False): 

2770 """Associate a floating IP with a fixed IP.""" 

2771 

2772 # Note(amotoki): 'affect_auto_assigned' is not respected 

2773 # since it is not used anywhere in nova code and I could 

2774 # find why this parameter exists. 

2775 

2776 client = get_client(context) 

2777 port_id = self._get_port_id_by_fixed_address(client, instance, 

2778 fixed_address) 

2779 fip = self._get_floating_ip_by_address(client, floating_address) 

2780 param = {'port_id': port_id, 

2781 'fixed_ip_address': fixed_address} 

2782 try: 

2783 client.update_floatingip(fip['id'], {'floatingip': param}) 

2784 except neutron_client_exc.Conflict as e: 

2785 raise exception.FloatingIpAssociateFailed(str(e)) 

2786 

2787 # If the floating IP was associated with another server, try to refresh 

2788 # the cache for that instance to avoid a window of time where multiple 

2789 # servers in the API say they are using the same floating IP. 

2790 if fip['port_id']: 

2791 # Trap and log any errors from 

2792 # _update_inst_info_cache_for_disassociated_fip but not let them 

2793 # raise back up to the caller since this refresh is best effort. 

2794 try: 

2795 self._update_inst_info_cache_for_disassociated_fip( 

2796 context, instance, client, fip) 

2797 except Exception as e: 

2798 LOG.warning('An error occurred while trying to refresh the ' 

2799 'network info cache for an instance associated ' 

2800 'with port %s. Error: %s', fip['port_id'], e) 

2801 

2802 def _update_inst_info_cache_for_disassociated_fip(self, context, 

2803 instance, client, fip): 

2804 """Update the network info cache when a floating IP is re-assigned. 

2805 

2806 :param context: nova auth RequestContext 

2807 :param instance: The instance to which the floating IP is now assigned 

2808 :param client: ClientWrapper instance for using the Neutron API 

2809 :param fip: dict for the floating IP that was re-assigned where the 

2810 the ``port_id`` value represents the port that was 

2811 associated with another server. 

2812 """ 

2813 port = self._show_port(context, fip['port_id'], 

2814 neutron_client=client) 

2815 orig_instance_uuid = port['device_id'] 

2816 

2817 msg_dict = dict(address=fip['floating_ip_address'], 

2818 instance_id=orig_instance_uuid) 

2819 LOG.info('re-assign floating IP %(address)s from ' 

2820 'instance %(instance_id)s', msg_dict, 

2821 instance=instance) 

2822 orig_instance = self._get_instance_by_uuid_using_api_db( 

2823 context, orig_instance_uuid) 

2824 if orig_instance: 

2825 # purge cached nw info for the original instance; pass the 

2826 # context from the instance in case we found it in another cell 

2827 update_instance_cache_with_nw_info( 

2828 self, orig_instance._context, orig_instance) 

2829 else: 

2830 # Leave a breadcrumb about not being able to refresh the 

2831 # the cache for the original instance. 

2832 LOG.info('Unable to refresh the network info cache for ' 

2833 'instance %s after disassociating floating IP %s. ' 

2834 'If the instance still exists, its info cache may ' 

2835 'be healed automatically.', 

2836 orig_instance_uuid, fip['id']) 

2837 

2838 @staticmethod 

2839 def _get_instance_by_uuid_using_api_db(context, instance_uuid): 

2840 """Look up the instance by UUID 

2841 

2842 This method is meant to be used sparingly since it tries to find 

2843 the instance by UUID in the cell-targeted context. If the instance 

2844 is not found, this method will try to determine if it's not found 

2845 because it is deleted or if it is just in another cell. Therefore 

2846 it assumes to have access to the API database and should only be 

2847 called from methods that are used in the control plane services. 

2848 

2849 :param context: cell-targeted nova auth RequestContext 

2850 :param instance_uuid: UUID of the instance to find 

2851 :returns: Instance object if the instance was found, else None. 

2852 """ 

2853 try: 

2854 return objects.Instance.get_by_uuid(context, instance_uuid) 

2855 except exception.InstanceNotFound: 

2856 # The instance could be deleted or it could be in another cell. 

2857 # To determine if its in another cell, check the instance 

2858 # mapping in the API DB. 

2859 try: 

2860 inst_map = objects.InstanceMapping.get_by_instance_uuid( 

2861 context, instance_uuid) 

2862 except exception.InstanceMappingNotFound: 

2863 # The instance is gone so just return. 

2864 return 

2865 

2866 # We have the instance mapping, look up the instance in the 

2867 # cell the instance is in. 

2868 with nova_context.target_cell( 

2869 context, inst_map.cell_mapping) as cctxt: 

2870 try: 

2871 return objects.Instance.get_by_uuid(cctxt, instance_uuid) 

2872 except exception.InstanceNotFound: 

2873 # Alright it's really gone. 

2874 return 

2875 

2876 def get_all(self, context): 

2877 """Get all networks for client.""" 

2878 client = get_client(context) 

2879 return client.list_networks().get('networks') 

2880 

2881 def get(self, context, network_uuid): 

2882 """Get specific network for client.""" 

2883 client = get_client(context) 

2884 try: 

2885 return client.show_network(network_uuid).get('network') or {} 

2886 except neutron_client_exc.NetworkNotFoundClient: 

2887 raise exception.NetworkNotFound(network_id=network_uuid) 

2888 

2889 def get_fixed_ip_by_address(self, context, address): 

2890 """Return instance uuids given an address.""" 

2891 uuid_maps = self._get_instance_uuids_by_ip(context, address) 

2892 if len(uuid_maps) == 1: 

2893 return uuid_maps[0] 

2894 elif not uuid_maps: 

2895 raise exception.FixedIpNotFoundForAddress(address=address) 

2896 else: 

2897 raise exception.FixedIpAssociatedWithMultipleInstances( 

2898 address=address) 

2899 

2900 def get_floating_ip(self, context, id): 

2901 """Return floating IP object given the floating IP id.""" 

2902 client = get_client(context) 

2903 try: 

2904 fip = client.show_floatingip(id)['floatingip'] 

2905 except neutron_client_exc.NeutronClientException as e: 

2906 if e.status_code == 404: 

2907 raise exception.FloatingIpNotFound(id=id) 

2908 

2909 with excutils.save_and_reraise_exception(): 

2910 LOG.exception('Unable to access floating IP %s', id) 

2911 

2912 # retrieve and cache the network details now since many callers need 

2913 # the network name which isn't present in the response from neutron 

2914 network_uuid = fip['floating_network_id'] 

2915 try: 

2916 fip['network_details'] = client.show_network( 

2917 network_uuid)['network'] 

2918 except neutron_client_exc.NetworkNotFoundClient: 

2919 raise exception.NetworkNotFound(network_id=network_uuid) 

2920 

2921 # ...and retrieve the port details for the same reason, but only if 

2922 # they're not already there because the fip-port-details extension is 

2923 # present 

2924 if not self.has_fip_port_details_extension(client=client): 

2925 port_id = fip['port_id'] 

2926 try: 

2927 fip['port_details'] = client.show_port( 

2928 port_id)['port'] 

2929 except neutron_client_exc.PortNotFoundClient: 

2930 # it's possible to create floating IPs without a port 

2931 fip['port_details'] = None 

2932 

2933 return fip 

2934 

2935 def get_floating_ip_by_address(self, context, address): 

2936 """Return a floating IP given an address.""" 

2937 client = get_client(context) 

2938 fip = self._get_floating_ip_by_address(client, address) 

2939 

2940 # retrieve and cache the network details now since many callers need 

2941 # the network name which isn't present in the response from neutron 

2942 network_uuid = fip['floating_network_id'] 

2943 try: 

2944 fip['network_details'] = client.show_network( 

2945 network_uuid)['network'] 

2946 except neutron_client_exc.NetworkNotFoundClient: 

2947 raise exception.NetworkNotFound(network_id=network_uuid) 

2948 

2949 # ...and retrieve the port details for the same reason, but only if 

2950 # they're not already there because the fip-port-details extension is 

2951 # present 

2952 if not self.has_fip_port_details_extension(client=client): 

2953 port_id = fip['port_id'] 

2954 try: 

2955 fip['port_details'] = client.show_port( 

2956 port_id)['port'] 

2957 except neutron_client_exc.PortNotFoundClient: 

2958 # it's possible to create floating IPs without a port 

2959 fip['port_details'] = None 

2960 

2961 return fip 

2962 

2963 def get_floating_ip_pools(self, context): 

2964 """Return floating IP pools a.k.a. external networks.""" 

2965 client = get_client(context) 

2966 data = client.list_networks(**{constants.NET_EXTERNAL: True}) 

2967 return data['networks'] 

2968 

2969 def get_floating_ips_by_project(self, context): 

2970 client = get_client(context) 

2971 project_id = context.project_id 

2972 fips = self._safe_get_floating_ips(client, tenant_id=project_id) 

2973 if not fips: 

2974 return fips 

2975 

2976 # retrieve and cache the network details now since many callers need 

2977 # the network name which isn't present in the response from neutron 

2978 networks = {net['id']: net for net in self._get_available_networks( 

2979 context, project_id, [fip['floating_network_id'] for fip in fips], 

2980 client)} 

2981 for fip in fips: 

2982 network_uuid = fip['floating_network_id'] 

2983 if network_uuid not in networks: 2983 ↛ 2984line 2983 didn't jump to line 2984 because the condition on line 2983 was never true

2984 raise exception.NetworkNotFound(network_id=network_uuid) 

2985 

2986 fip['network_details'] = networks[network_uuid] 

2987 

2988 # ...and retrieve the port details for the same reason, but only if 

2989 # they're not already there because the fip-port-details extension is 

2990 # present 

2991 if not self.has_fip_port_details_extension(client=client): 

2992 ports = {port['id']: port for port in client.list_ports( 

2993 **{'tenant_id': project_id})['ports']} 

2994 for fip in fips: 

2995 port_id = fip['port_id'] 

2996 if port_id in ports: 

2997 fip['port_details'] = ports[port_id] 

2998 else: 

2999 # it's possible to create floating IPs without a port 

3000 fip['port_details'] = None 

3001 

3002 return fips 

3003 

3004 def get_instance_id_by_floating_address(self, context, address): 

3005 """Return the instance id a floating IP's fixed IP is allocated to.""" 

3006 client = get_client(context) 

3007 fip = self._get_floating_ip_by_address(client, address) 

3008 if not fip['port_id']: 

3009 return None 

3010 

3011 try: 

3012 port = self._show_port(context, fip['port_id'], 

3013 neutron_client=client) 

3014 except exception.PortNotFound: 

3015 # NOTE: Here is a potential race condition between _show_port() and 

3016 # _get_floating_ip_by_address(). fip['port_id'] shows a port which 

3017 # is the server instance's. At _get_floating_ip_by_address(), 

3018 # Neutron returns the list which includes the instance. Just after 

3019 # that, the deletion of the instance happens and Neutron returns 

3020 # 404 on _show_port(). 

3021 LOG.debug('The port(%s) is not found', fip['port_id']) 

3022 return None 

3023 

3024 return port['device_id'] 

3025 

3026 def get_vifs_by_instance(self, context, instance): 

3027 return objects.VirtualInterfaceList.get_by_instance_uuid(context, 

3028 instance.uuid) 

3029 

3030 def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id): 

3031 search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'} 

3032 if uuidutils.is_uuid_like(name_or_id): 

3033 search_opts.update({'id': name_or_id}) 

3034 else: 

3035 search_opts.update({'name': name_or_id}) 

3036 data = client.list_networks(**search_opts) 

3037 nets = data['networks'] 

3038 

3039 if len(nets) == 1: 3039 ↛ 3041line 3039 didn't jump to line 3041 because the condition on line 3039 was always true

3040 return nets[0]['id'] 

3041 elif len(nets) == 0: 

3042 raise exception.FloatingIpPoolNotFound() 

3043 else: 

3044 msg = (_("Multiple floating IP pools matches found for name '%s'") 

3045 % name_or_id) 

3046 raise exception.NovaException(message=msg) 

3047 

3048 def allocate_floating_ip(self, context, pool=None): 

3049 """Add a floating IP to a project from a pool.""" 

3050 client = get_client(context) 

3051 pool = pool or CONF.neutron.default_floating_pool 

3052 pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool) 

3053 

3054 param = {'floatingip': {'floating_network_id': pool_id}} 

3055 try: 

3056 fip = client.create_floatingip(param) 

3057 except (neutron_client_exc.IpAddressGenerationFailureClient, 

3058 neutron_client_exc.ExternalIpAddressExhaustedClient) as e: 

3059 raise exception.NoMoreFloatingIps(str(e)) 

3060 except neutron_client_exc.OverQuotaClient as e: 

3061 raise exception.FloatingIpLimitExceeded(str(e)) 

3062 except neutron_client_exc.BadRequest as e: 

3063 raise exception.FloatingIpBadRequest(str(e)) 

3064 

3065 return fip['floatingip']['floating_ip_address'] 

3066 

3067 def _safe_get_floating_ips(self, client, **kwargs): 

3068 """Get floating IP gracefully handling 404 from Neutron.""" 

3069 try: 

3070 return client.list_floatingips(**kwargs)['floatingips'] 

3071 # If a neutron plugin does not implement the L3 API a 404 from 

3072 # list_floatingips will be raised. 

3073 except neutron_client_exc.NotFound: 

3074 return [] 

3075 except neutron_client_exc.NeutronClientException as e: 

3076 # bug/1513879 neutron client is currently using 

3077 # NeutronClientException when there is no L3 API 

3078 if e.status_code == 404: 

3079 return [] 

3080 with excutils.save_and_reraise_exception(): 

3081 LOG.exception('Unable to access floating IP for %s', 

3082 ', '.join(['%s %s' % (k, v) 

3083 for k, v in kwargs.items()])) 

3084 

3085 def _get_floating_ip_by_address(self, client, address): 

3086 """Get floating IP from floating IP address.""" 

3087 if not address: 3087 ↛ 3088line 3087 didn't jump to line 3088 because the condition on line 3087 was never true

3088 raise exception.FloatingIpNotFoundForAddress(address=address) 

3089 fips = self._safe_get_floating_ips(client, floating_ip_address=address) 

3090 if len(fips) == 0: 

3091 raise exception.FloatingIpNotFoundForAddress(address=address) 

3092 elif len(fips) > 1: 

3093 raise exception.FloatingIpMultipleFoundForAddress(address=address) 

3094 return fips[0] 

3095 

3096 def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port): 

3097 """Get floating IPs from fixed IP and port.""" 

3098 return self._safe_get_floating_ips(client, fixed_ip_address=fixed_ip, 

3099 port_id=port) 

3100 

3101 def release_floating_ip(self, context, address, 

3102 affect_auto_assigned=False): 

3103 """Remove a floating IP with the given address from a project.""" 

3104 

3105 # Note(amotoki): We cannot handle a case where multiple pools 

3106 # have overlapping IP address range. In this case we cannot use 

3107 # 'address' as a unique key. 

3108 # This is a limitation of the current nova. 

3109 

3110 # Note(amotoki): 'affect_auto_assigned' is not respected 

3111 # since it is not used anywhere in nova code and I could 

3112 # find why this parameter exists. 

3113 

3114 self._release_floating_ip(context, address) 

3115 

3116 def disassociate_and_release_floating_ip(self, context, instance, 

3117 floating_ip): 

3118 """Removes (deallocates) and deletes the floating IP. 

3119 

3120 This api call was added to allow this to be done in one operation 

3121 if using neutron. 

3122 """ 

3123 

3124 @refresh_cache 

3125 def _release_floating_ip_and_refresh_cache(self, context, instance, 

3126 floating_ip): 

3127 self._release_floating_ip( 

3128 context, floating_ip['floating_ip_address'], 

3129 raise_if_associated=False) 

3130 

3131 if instance: 

3132 _release_floating_ip_and_refresh_cache(self, context, instance, 

3133 floating_ip) 

3134 else: 

3135 self._release_floating_ip( 

3136 context, floating_ip['floating_ip_address'], 

3137 raise_if_associated=False) 

3138 

3139 def _release_floating_ip(self, context, address, 

3140 raise_if_associated=True): 

3141 client = get_client(context) 

3142 fip = self._get_floating_ip_by_address(client, address) 

3143 

3144 if raise_if_associated and fip['port_id']: 

3145 raise exception.FloatingIpAssociated(address=address) 

3146 try: 

3147 client.delete_floatingip(fip['id']) 

3148 except neutron_client_exc.NotFound: 

3149 raise exception.FloatingIpNotFoundForAddress( 

3150 address=address 

3151 ) 

3152 

3153 @refresh_cache 

3154 def disassociate_floating_ip(self, context, instance, address, 

3155 affect_auto_assigned=False): 

3156 """Disassociate a floating IP from the instance.""" 

3157 

3158 # Note(amotoki): 'affect_auto_assigned' is not respected 

3159 # since it is not used anywhere in nova code and I could 

3160 # find why this parameter exists. 

3161 

3162 client = get_client(context) 

3163 fip = self._get_floating_ip_by_address(client, address) 

3164 client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}}) 

3165 

3166 def migrate_instance_start(self, context, instance, migration): 

3167 """Start to migrate the network of an instance. 

3168 

3169 If the instance has port bindings on the destination compute host, 

3170 they are activated in this method which will atomically change the 

3171 source compute host port binding to inactive and also change the port 

3172 "binding:host_id" attribute to the destination host. 

3173 

3174 If there are no binding resources for the attached ports on the given 

3175 destination host, this method is a no-op. 

3176 

3177 :param context: The user request context. 

3178 :param instance: The instance being migrated. 

3179 :param migration: dict with required keys:: 

3180 

3181 "source_compute": The name of the source compute host. 

3182 "dest_compute": The name of the destination compute host. 

3183 

3184 :raises: nova.exception.PortBindingActivationFailed if any port binding 

3185 activation fails 

3186 """ 

3187 if not self.has_port_binding_extension(context): 

3188 # If neutron isn't new enough yet for the port "binding-extended" 

3189 # API extension, we just no-op. The port binding host will be 

3190 # be updated in migrate_instance_finish, which is functionally OK, 

3191 # it's just not optimal. 

3192 LOG.debug('Neutron is not new enough to perform early destination ' 

3193 'host port binding activation. Port bindings will be ' 

3194 'updated later.', instance=instance) 

3195 return 

3196 

3197 client = get_client(context, admin=True) 

3198 dest_host = migration.dest_compute 

3199 for vif in instance.get_network_info(): 

3200 # Not all compute migration flows use the port binding-extended 

3201 # API yet, so first check to see if there is a binding for the 

3202 # port and destination host. 

3203 try: 

3204 binding = client.show_port_binding( 

3205 vif['id'], dest_host 

3206 )['binding'] 

3207 except neutron_client_exc.NeutronClientException as exc: 

3208 if exc.status_code != 404: 

3209 # We don't raise an exception here because we assume that 

3210 # port bindings will be updated correctly when 

3211 # migrate_instance_finish runs 

3212 LOG.error( 

3213 'Unexpected error trying to get binding info ' 

3214 'for port %s and destination host %s.', 

3215 vif['id'], dest_host, exc_info=True) 

3216 continue 

3217 

3218 # ...but if there is no port binding record for the destination 

3219 # host, we can safely assume none of the ports attached to the 

3220 # instance are using the binding-extended API in this flow and 

3221 # exit early. 

3222 return 

3223 

3224 if binding['status'] == 'ACTIVE': 

3225 # We might be racing with another thread that's handling 

3226 # post-migrate operations and already activated the port 

3227 # binding for the destination host. 

3228 LOG.debug( 

3229 'Port %s binding to destination host %s is already ACTIVE', 

3230 vif['id'], dest_host, instance=instance) 

3231 continue 

3232 

3233 try: 

3234 # This is a bit weird in that we don't PUT and update the 

3235 # status to ACTIVE, it's more like a POST action method in the 

3236 # compute API. 

3237 client.activate_port_binding(vif['id'], dest_host) 

3238 LOG.debug( 

3239 'Activated binding for port %s and host %s', 

3240 vif['id'], dest_host) 

3241 except neutron_client_exc.NeutronClientException as exc: 

3242 # A 409 means the port binding is already active, which 

3243 # shouldn't happen if the caller is doing things in the correct 

3244 # order. 

3245 if exc.status_code == 409: 

3246 LOG.warning( 

3247 'Binding for port %s and host %s is already active', 

3248 vif['id'], dest_host, exc_info=True) 

3249 continue 

3250 

3251 # Log the details, raise an exception. 

3252 LOG.error( 

3253 'Unexpected error trying to activate binding ' 

3254 'for port %s and host %s.', 

3255 vif['id'], dest_host, exc_info=True) 

3256 raise exception.PortBindingActivationFailed( 

3257 port_id=vif['id'], host=dest_host) 

3258 

3259 # TODO(mriedem): Do we need to call 

3260 # _clear_migration_port_profile? migrate_instance_finish 

3261 # would normally take care of clearing the "migrating_to" 

3262 # attribute on each port when updating the port's 

3263 # binding:host_id to point to the destination host. 

3264 

3265 def migrate_instance_finish( 

3266 self, context, instance, migration, provider_mappings): 

3267 """Finish migrating the network of an instance. 

3268 

3269 :param context: nova auth request context 

3270 :param instance: Instance object being migrated 

3271 :param migration: Migration object for the operation; used to determine 

3272 the phase of the migration which dictates what to do with claimed 

3273 PCI devices for SR-IOV ports 

3274 :param provider_mappings: a dict of list of resource provider uuids 

3275 keyed by port uuid 

3276 """ 

3277 self._update_port_binding_for_instance( 

3278 context, instance, migration.dest_compute, migration=migration, 

3279 provider_mappings=provider_mappings) 

3280 

3281 def _nw_info_get_ips(self, client, port): 

3282 network_IPs = [] 

3283 for fixed_ip in port['fixed_ips']: 

3284 fixed = network_model.FixedIP(address=fixed_ip['ip_address']) 

3285 floats = self._get_floating_ips_by_fixed_and_port( 

3286 client, fixed_ip['ip_address'], port['id']) 

3287 for ip in floats: 

3288 fip = network_model.IP(address=ip['floating_ip_address'], 

3289 type='floating') 

3290 fixed.add_floating_ip(fip) 

3291 network_IPs.append(fixed) 

3292 return network_IPs 

3293 

3294 def _nw_info_get_subnets(self, context, port, network_IPs, client=None): 

3295 subnets = self._get_subnets_from_port(context, port, client) 

3296 for subnet in subnets: 

3297 subnet['ips'] = [fixed_ip for fixed_ip in network_IPs 

3298 if fixed_ip.is_in_subnet(subnet)] 

3299 return subnets 

3300 

3301 def _nw_info_build_network(self, context, port, networks, subnets): 

3302 # TODO(stephenfin): Pass in an existing admin client if available. 

3303 neutron = get_client(context, admin=True) 

3304 network_name = None 

3305 network_mtu = None 

3306 for net in networks: 

3307 if port['network_id'] == net['id']: 

3308 network_name = net['name'] 

3309 tenant_id = net['tenant_id'] 

3310 network_mtu = net.get('mtu') 

3311 break 

3312 else: 

3313 tenant_id = port['tenant_id'] 

3314 LOG.warning("Network %(id)s not matched with the tenants " 

3315 "network! The ports tenant %(tenant_id)s will be " 

3316 "used.", 

3317 {'id': port['network_id'], 'tenant_id': tenant_id}) 

3318 

3319 bridge = None 

3320 ovs_interfaceid = None 

3321 # Network model metadata 

3322 should_create_bridge = None 

3323 vif_type = port.get('binding:vif_type') 

3324 port_details = port.get('binding:vif_details', {}) 

3325 if vif_type in [network_model.VIF_TYPE_OVS, 

3326 network_model.VIF_TYPE_AGILIO_OVS]: 

3327 bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME, 

3328 CONF.neutron.ovs_bridge) 

3329 ovs_interfaceid = port['id'] 

3330 elif vif_type == network_model.VIF_TYPE_BRIDGE: 

3331 bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME, 

3332 "brq" + port['network_id']) 

3333 should_create_bridge = True 

3334 elif vif_type == network_model.VIF_TYPE_DVS: 

3335 # The name of the DVS port group will contain the neutron 

3336 # network id 

3337 bridge = port['network_id'] 

3338 elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and 

3339 port_details.get(network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG, 

3340 False)): 

3341 bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME, 

3342 CONF.neutron.ovs_bridge) 

3343 ovs_interfaceid = port['id'] 

3344 elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and 

3345 port_details.get(network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG, 

3346 False)): 

3347 bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME, 

3348 "brq" + port['network_id']) 

3349 

3350 # Prune the bridge name if necessary. For the DVS this is not done 

3351 # as the bridge is a '<network-name>-<network-UUID>'. 

3352 if bridge is not None and vif_type != network_model.VIF_TYPE_DVS: 

3353 bridge = bridge[:network_model.NIC_NAME_LEN] 

3354 

3355 physnet, tunneled = self._get_physnet_tunneled_info( 

3356 context, neutron, port['network_id']) 

3357 network = network_model.Network( 

3358 id=port['network_id'], 

3359 bridge=bridge, 

3360 injected=CONF.flat_injected, 

3361 label=network_name, 

3362 tenant_id=tenant_id, 

3363 mtu=network_mtu, 

3364 physical_network=physnet, 

3365 tunneled=tunneled 

3366 ) 

3367 network['subnets'] = subnets 

3368 

3369 if should_create_bridge is not None: 

3370 network['should_create_bridge'] = should_create_bridge 

3371 return network, ovs_interfaceid 

3372 

3373 def _get_preexisting_port_ids(self, instance): 

3374 """Retrieve the preexisting ports associated with the given instance. 

3375 These ports were not created by nova and hence should not be 

3376 deallocated upon instance deletion. 

3377 """ 

3378 net_info = instance.get_network_info() 

3379 if not net_info: 

3380 LOG.debug('Instance cache missing network info.', 

3381 instance=instance) 

3382 return [vif['id'] for vif in net_info 

3383 if vif.get('preserve_on_delete')] 

3384 

3385 def _build_vif_model(self, context, client, current_neutron_port, 

3386 networks, preexisting_port_ids): 

3387 """Builds a ``nova.network.model.VIF`` object based on the parameters 

3388 and current state of the port in Neutron. 

3389 

3390 :param context: Request context. 

3391 :param client: Neutron client. 

3392 :param current_neutron_port: The current state of a Neutron port 

3393 from which to build the VIF object model. 

3394 :param networks: List of dicts which represent Neutron networks 

3395 associated with the ports currently attached to a given server 

3396 instance. 

3397 :param preexisting_port_ids: List of IDs of ports attached to a 

3398 given server instance which Nova did not create and therefore 

3399 should not delete when the port is detached from the server. 

3400 :return: nova.network.model.VIF object which represents a port in the 

3401 instance network info cache. 

3402 """ 

3403 vif_active = False 

3404 if (current_neutron_port['admin_state_up'] is False or 

3405 current_neutron_port['status'] == 'ACTIVE'): 

3406 vif_active = True 

3407 

3408 network_IPs = self._nw_info_get_ips(client, 

3409 current_neutron_port) 

3410 subnets = self._nw_info_get_subnets(context, 

3411 current_neutron_port, 

3412 network_IPs, client) 

3413 

3414 devname = "tap" + current_neutron_port['id'] 

3415 devname = devname[:network_model.NIC_NAME_LEN] 

3416 

3417 network, ovs_interfaceid = ( 

3418 self._nw_info_build_network(context, current_neutron_port, 

3419 networks, subnets)) 

3420 preserve_on_delete = (current_neutron_port['id'] in 

3421 preexisting_port_ids) 

3422 

3423 return network_model.VIF( 

3424 id=current_neutron_port['id'], 

3425 address=current_neutron_port['mac_address'], 

3426 network=network, 

3427 vnic_type=current_neutron_port.get('binding:vnic_type', 

3428 network_model.VNIC_TYPE_NORMAL), 

3429 type=current_neutron_port.get('binding:vif_type'), 

3430 profile=get_binding_profile(current_neutron_port), 

3431 details=current_neutron_port.get('binding:vif_details'), 

3432 ovs_interfaceid=ovs_interfaceid, 

3433 devname=devname, 

3434 active=vif_active, 

3435 preserve_on_delete=preserve_on_delete, 

3436 delegate_create=True, 

3437 ) 

3438 

3439 def _log_error_if_vnic_type_changed( 

3440 self, port_id, old_vnic_type, new_vnic_type, instance 

3441 ): 

3442 if old_vnic_type and old_vnic_type != new_vnic_type: 3442 ↛ 3443line 3442 didn't jump to line 3443 because the condition on line 3442 was never true

3443 LOG.error( 

3444 'The vnic_type of the bound port %s has ' 

3445 'been changed in neutron from "%s" to ' 

3446 '"%s". Changing vnic_type of a bound port ' 

3447 'is not supported by Nova. To avoid ' 

3448 'breaking the connectivity of the instance ' 

3449 'please change the port vnic_type back to ' 

3450 '"%s".', 

3451 port_id, 

3452 old_vnic_type, 

3453 new_vnic_type, 

3454 old_vnic_type, 

3455 instance=instance 

3456 ) 

3457 

3458 def _build_network_info_model(self, context, instance, networks=None, 

3459 port_ids=None, admin_client=None, 

3460 preexisting_port_ids=None, 

3461 refresh_vif_id=None, force_refresh=False): 

3462 """Return list of ordered VIFs attached to instance. 

3463 

3464 :param context: Request context. 

3465 :param instance: Instance we are returning network info for. 

3466 :param networks: List of networks being attached to an instance. 

3467 If value is None this value will be populated 

3468 from the existing cached value. 

3469 :param port_ids: List of port_ids that are being attached to an 

3470 instance in order of attachment. If value is None 

3471 this value will be populated from the existing 

3472 cached value. 

3473 :param admin_client: A neutron client for the admin context. 

3474 :param preexisting_port_ids: List of port_ids that nova didn't 

3475 allocate and there shouldn't be deleted when 

3476 an instance is de-allocated. Supplied list will 

3477 be added to the cached list of preexisting port 

3478 IDs for this instance. 

3479 :param refresh_vif_id: Optional port ID to refresh within the existing 

3480 cache rather than the entire cache. This can be 

3481 triggered via a "network-changed" server external event 

3482 from Neutron. 

3483 :param force_refresh: If ``networks`` and ``port_ids`` are both None, 

3484 by default the instance.info_cache will be used to 

3485 populate the network info. Pass ``True`` to force 

3486 collection of ports and networks from neutron directly. 

3487 """ 

3488 

3489 search_opts = {'tenant_id': instance.project_id, 

3490 'device_id': instance.uuid, } 

3491 if admin_client is None: 

3492 client = get_client(context, admin=True) 

3493 else: 

3494 client = admin_client 

3495 

3496 data = client.list_ports(**search_opts) 

3497 

3498 current_neutron_ports = data.get('ports', []) 

3499 

3500 if preexisting_port_ids is None: 

3501 preexisting_port_ids = [] 

3502 preexisting_port_ids = set( 

3503 preexisting_port_ids + self._get_preexisting_port_ids(instance)) 

3504 

3505 current_neutron_port_map = {} 

3506 for current_neutron_port in current_neutron_ports: 

3507 current_neutron_port_map[current_neutron_port['id']] = ( 

3508 current_neutron_port) 

3509 

3510 # Figure out what kind of operation we're processing. If we're given 

3511 # a single port to refresh then we try to optimize and update just the 

3512 # information for that VIF in the existing cache rather than try to 

3513 # rebuild the entire thing. 

3514 if refresh_vif_id is not None: 

3515 # TODO(mriedem): Consider pulling this out into it's own method. 

3516 nw_info = instance.get_network_info() 

3517 if nw_info: 

3518 current_neutron_port = current_neutron_port_map.get( 

3519 refresh_vif_id) 

3520 if current_neutron_port: 

3521 # Get the network for the port. 

3522 networks = self._get_available_networks( 

3523 context, instance.project_id, 

3524 [current_neutron_port['network_id']], client) 

3525 # Build the VIF model given the latest port information. 

3526 refreshed_vif = self._build_vif_model( 

3527 context, client, current_neutron_port, networks, 

3528 preexisting_port_ids) 

3529 for index, vif in enumerate(nw_info): 

3530 if vif['id'] == refresh_vif_id: 

3531 self._log_error_if_vnic_type_changed( 

3532 vif['id'], 

3533 vif['vnic_type'], 

3534 refreshed_vif['vnic_type'], 

3535 instance, 

3536 ) 

3537 # Update the existing entry. 

3538 nw_info[index] = refreshed_vif 

3539 LOG.debug('Updated VIF entry in instance network ' 

3540 'info cache for port %s.', 

3541 refresh_vif_id, instance=instance) 

3542 break 

3543 else: 

3544 # If it wasn't in the existing cache, add it. 

3545 nw_info.append(refreshed_vif) 

3546 LOG.debug('Added VIF to instance network info cache ' 

3547 'for port %s.', refresh_vif_id, 

3548 instance=instance) 

3549 else: 

3550 # This port is no longer associated with the instance, so 

3551 # simply remove it from the nw_info cache. 

3552 for index, vif in enumerate(nw_info): 3552 ↛ 3561line 3552 didn't jump to line 3561 because the loop on line 3552 didn't complete

3553 if vif['id'] == refresh_vif_id: 

3554 LOG.info('Port %s from network info_cache is no ' 

3555 'longer associated with instance in ' 

3556 'Neutron. Removing from network ' 

3557 'info_cache.', refresh_vif_id, 

3558 instance=instance) 

3559 del nw_info[index] 

3560 break 

3561 return nw_info 

3562 # else there is no existing cache and we need to build it 

3563 

3564 # Determine if we're doing a full refresh (_heal_instance_info_cache) 

3565 # or if we are refreshing because we have attached/detached a port. 

3566 # TODO(mriedem); we should leverage refresh_vif_id in the latter case 

3567 # since we are unnecessarily rebuilding the entire cache for one port 

3568 nw_info_refresh = networks is None and port_ids is None 

3569 if nw_info_refresh and force_refresh: 

3570 # Use the current set of ports from neutron rather than the cache. 

3571 port_ids = self._get_ordered_port_list(context, instance, 

3572 current_neutron_ports) 

3573 net_ids = [ 

3574 current_neutron_port_map.get(port_id, {}).get('network_id') 

3575 for port_id in port_ids] 

3576 

3577 # This is copied from _gather_port_ids_and_networks. 

3578 networks = self._get_available_networks( 

3579 context, instance.project_id, net_ids, client) 

3580 else: 

3581 # We are refreshing the full cache using the existing cache rather 

3582 # than what is currently in neutron. 

3583 networks, port_ids = self._gather_port_ids_and_networks( 

3584 context, instance, networks, port_ids, client) 

3585 

3586 old_nw_info = instance.get_network_info() 

3587 nw_info = network_model.NetworkInfo() 

3588 for port_id in port_ids: 

3589 current_neutron_port = current_neutron_port_map.get(port_id) 

3590 if current_neutron_port: 

3591 vif = self._build_vif_model( 

3592 context, client, current_neutron_port, networks, 

3593 preexisting_port_ids) 

3594 for old_vif in old_nw_info: 

3595 if old_vif['id'] == port_id: 

3596 self._log_error_if_vnic_type_changed( 

3597 port_id, 

3598 old_vif['vnic_type'], 

3599 vif['vnic_type'], 

3600 instance, 

3601 ) 

3602 nw_info.append(vif) 

3603 elif nw_info_refresh: 3603 ↛ 3588line 3603 didn't jump to line 3588 because the condition on line 3603 was always true

3604 LOG.info('Port %s from network info_cache is no ' 

3605 'longer associated with instance in Neutron. ' 

3606 'Removing from network info_cache.', port_id, 

3607 instance=instance) 

3608 

3609 return nw_info 

3610 

3611 def _get_ordered_port_list(self, context, instance, current_neutron_ports): 

3612 """Returns ordered port list using nova virtual_interface data.""" 

3613 

3614 # a dict, keyed by port UUID, of the port's "index" 

3615 # so that we can order the returned port UUIDs by the 

3616 # original insertion order followed by any newly-attached 

3617 # ports 

3618 port_uuid_to_index_map = {} 

3619 port_order_list = [] 

3620 ports_without_order = [] 

3621 

3622 # Get set of ports from nova vifs 

3623 vifs = self.get_vifs_by_instance(context, instance) 

3624 for port in current_neutron_ports: 

3625 # NOTE(mjozefcz): For each port check if we have its index from 

3626 # nova virtual_interfaces objects. If not - it seems 

3627 # to be a new port - add it at the end of list. 

3628 

3629 # Find port index if it was attached before. 

3630 for vif in vifs: 

3631 if vif.uuid == port['id']: 

3632 port_uuid_to_index_map[port['id']] = vif.id 

3633 break 

3634 

3635 if port['id'] not in port_uuid_to_index_map: 

3636 # Assume that it's new port and add it to the end of port list. 

3637 ports_without_order.append(port['id']) 

3638 

3639 # Lets sort created port order_list by given index. 

3640 port_order_list = sorted(port_uuid_to_index_map, 

3641 key=lambda k: port_uuid_to_index_map[k]) 

3642 

3643 # Add ports without order to the end of list 

3644 port_order_list.extend(ports_without_order) 

3645 

3646 return port_order_list 

3647 

3648 def _get_subnets_from_port(self, context, port, client=None): 

3649 """Return the subnets for a given port.""" 

3650 

3651 fixed_ips = port['fixed_ips'] 

3652 # No fixed_ips for the port means there is no subnet associated 

3653 # with the network the port is created on. 

3654 # Since list_subnets(id=[]) returns all subnets visible for the 

3655 # current tenant, returned subnets may contain subnets which is not 

3656 # related to the port. To avoid this, the method returns here. 

3657 if not fixed_ips: 

3658 return [] 

3659 if not client: 

3660 client = get_client(context) 

3661 search_opts = {'id': list(set(ip['subnet_id'] for ip in fixed_ips))} 

3662 data = client.list_subnets(**search_opts) 

3663 ipam_subnets = data.get('subnets', []) 

3664 subnets = [] 

3665 

3666 for subnet in ipam_subnets: 

3667 subnet_dict = {'cidr': subnet['cidr'], 

3668 'gateway': network_model.IP( 

3669 address=subnet['gateway_ip'], 

3670 type='gateway'), 

3671 'enable_dhcp': False, 

3672 } 

3673 if subnet.get('ipv6_address_mode'): 3673 ↛ 3674line 3673 didn't jump to line 3674 because the condition on line 3673 was never true

3674 subnet_dict['ipv6_address_mode'] = subnet['ipv6_address_mode'] 

3675 

3676 # attempt to populate DHCP server field 

3677 dhcp_search_opts = { 

3678 'network_id': subnet['network_id'], 

3679 'device_owner': 'network:dhcp'} 

3680 data = client.list_ports(**dhcp_search_opts) 

3681 dhcp_ports = data.get('ports', []) 

3682 for p in dhcp_ports: 

3683 for ip_pair in p['fixed_ips']: 

3684 if ip_pair['subnet_id'] == subnet['id']: 

3685 subnet_dict['dhcp_server'] = ip_pair['ip_address'] 

3686 break 

3687 

3688 # NOTE(stblatzheim): If enable_dhcp is set on subnet, but subnet 

3689 # has ovn native dhcp and no dhcp-agents. Network owner will be 

3690 # network:distributed 

3691 # Just rely on enable_dhcp flag given by neutron 

3692 # Fix for https://bugs.launchpad.net/nova/+bug/2055245 

3693 

3694 if subnet.get('enable_dhcp'): 

3695 subnet_dict['enable_dhcp'] = True 

3696 

3697 subnet_object = network_model.Subnet(**subnet_dict) 

3698 for dns in subnet.get('dns_nameservers', []): 

3699 subnet_object.add_dns( 

3700 network_model.IP(address=dns, type='dns')) 

3701 

3702 for route in subnet.get('host_routes', []): 

3703 subnet_object.add_route( 

3704 network_model.Route(cidr=route['destination'], 

3705 gateway=network_model.IP( 

3706 address=route['nexthop'], 

3707 type='gateway'))) 

3708 

3709 subnets.append(subnet_object) 

3710 return subnets 

3711 

3712 def setup_instance_network_on_host( 

3713 self, context, instance, host, migration=None, 

3714 provider_mappings=None): 

3715 """Setup network for specified instance on host. 

3716 

3717 :param context: The request context. 

3718 :param instance: nova.objects.instance.Instance object. 

3719 :param host: The host which network should be setup for instance. 

3720 :param migration: The migration object if the instance is being 

3721 tracked with a migration. 

3722 :param provider_mappings: a dict of lists of resource provider uuids 

3723 keyed by port uuid 

3724 """ 

3725 self._update_port_binding_for_instance( 

3726 context, instance, host, migration, provider_mappings) 

3727 

3728 def cleanup_instance_network_on_host(self, context, instance, host): 

3729 """Cleanup network for specified instance on host. 

3730 

3731 Port bindings for the given host are deleted. The ports associated 

3732 with the instance via the port device_id field are left intact. 

3733 

3734 :param context: The user request context. 

3735 :param instance: Instance object with the associated ports 

3736 :param host: host from which to delete port bindings 

3737 :raises: PortBindingDeletionFailed if port binding deletion fails. 

3738 """ 

3739 # First check to see if the port binding extension is supported. 

3740 client = get_client(context) 

3741 if not self.has_port_binding_extension(client=client): 

3742 LOG.info("Neutron extension '%s' is not supported; not cleaning " 

3743 "up port bindings for host %s.", 

3744 constants.PORT_BINDING_EXTENDED, host, instance=instance) 

3745 return 

3746 # Now get the ports associated with the instance. We go directly to 

3747 # neutron rather than rely on the info cache just like 

3748 # setup_networks_on_host. 

3749 search_opts = {'device_id': instance.uuid, 

3750 'tenant_id': instance.project_id, 

3751 'fields': ['id']} # we only need the port id 

3752 data = self.list_ports(context, **search_opts) 

3753 self._delete_port_bindings(context, data['ports'], host) 

3754 

3755 def _get_pci_mapping_for_migration(self, instance, migration): 

3756 if not instance.migration_context: 

3757 return {} 

3758 # In case of revert, swap old and new devices to 

3759 # update the ports back to the original devices. 

3760 revert = migration and migration.status == 'reverted' 

3761 return instance.migration_context.get_pci_mapping_for_migration(revert) 

3762 

3763 def _get_port_pci_dev(self, instance, port): 

3764 """Find the PCI device corresponding to the port. 

3765 Assumes the port is an SRIOV one. 

3766 

3767 :param instance: The instance to which the port is attached. 

3768 :param port: The Neutron port, as obtained from the Neutron API 

3769 JSON form. 

3770 :return: The PciDevice object, or None if unable to find. 

3771 """ 

3772 # Find the port's PCIRequest, or return None 

3773 for r in instance.pci_requests.requests: 

3774 if r.requester_id == port['id']: 

3775 request = r 

3776 break 

3777 else: 

3778 LOG.debug('No PCI request found for port %s', port['id'], 

3779 instance=instance) 

3780 return None 

3781 # Find the request's device, or return None 

3782 for d in instance.pci_devices: 

3783 if d.request_id == request.request_id: 

3784 device = d 

3785 break 

3786 else: 

3787 LOG.debug('No PCI device found for request %s', 

3788 request.request_id, instance=instance) 

3789 return None 

3790 return device 

3791 

3792 def _update_port_binding_for_instance( 

3793 self, context, instance, host, migration=None, 

3794 provider_mappings=None): 

3795 

3796 neutron = get_client(context, admin=True) 

3797 search_opts = {'device_id': instance.uuid, 

3798 'tenant_id': instance.project_id} 

3799 data = neutron.list_ports(**search_opts) 

3800 port_updates = [] 

3801 ports = data['ports'] 

3802 FAILED_VIF_TYPES = (network_model.VIF_TYPE_UNBOUND, 

3803 network_model.VIF_TYPE_BINDING_FAILED) 

3804 for p in ports: 

3805 updates = {} 

3806 binding_profile = get_binding_profile(p) 

3807 

3808 # We need to update the port binding if the host has changed or if 

3809 # the binding is clearly wrong due to previous lost messages. 

3810 vif_type = p.get('binding:vif_type') 

3811 if (p.get(constants.BINDING_HOST_ID) != host or 

3812 vif_type in FAILED_VIF_TYPES): 

3813 

3814 updates[constants.BINDING_HOST_ID] = host 

3815 # If the host changed, the AZ could have also changed so we 

3816 # need to update the device_owner. 

3817 updates['device_owner'] = ( 

3818 'compute:%s' % instance.availability_zone) 

3819 # NOTE: Before updating the port binding make sure we 

3820 # remove the pre-migration status from the binding profile 

3821 if binding_profile.get(constants.MIGRATING_ATTR): 

3822 del binding_profile[constants.MIGRATING_ATTR] 

3823 updates[constants.BINDING_PROFILE] = binding_profile 

3824 

3825 # Update port with newly allocated PCI devices. Even if the 

3826 # resize is happening on the same host, a new PCI device can be 

3827 # allocated. Note that this only needs to happen if a migration 

3828 # is in progress such as in a resize / migrate. It is possible 

3829 # that this function is called without a migration object, such 

3830 # as in an unshelve operation. 

3831 vnic_type = p.get('binding:vnic_type') 

3832 if vnic_type in network_model.VNIC_TYPES_SRIOV: 

3833 # NOTE(artom) For migrations, update the binding profile from 

3834 # the migration object... 

3835 if migration is not None: 

3836 # NOTE(artom) ... except for live migrations, because the 

3837 # conductor has already done that when calling 

3838 # bind_ports_to_host(). 

3839 if not migration.is_live_migration: 

3840 pci_mapping = self._get_pci_mapping_for_migration( 

3841 instance, migration) 

3842 

3843 pci_slot = binding_profile.get('pci_slot') 

3844 new_dev = pci_mapping.get(pci_slot) 

3845 if new_dev: 

3846 binding_profile.update( 

3847 self._get_pci_device_profile(new_dev)) 

3848 updates[ 

3849 constants.BINDING_PROFILE] = binding_profile 

3850 else: 

3851 raise exception.PortUpdateFailed(port_id=p['id'], 

3852 reason=_("Unable to correlate PCI slot %s") % 

3853 pci_slot) 

3854 # NOTE(artom) If migration is None, this is an unshelve, and we 

3855 # need to figure out the pci related binding information from 

3856 # the InstancePCIRequest and PciDevice objects. 

3857 else: 

3858 pci_dev = self._get_port_pci_dev(instance, p) 

3859 if pci_dev: 

3860 binding_profile.update( 

3861 self._get_pci_device_profile(pci_dev)) 

3862 updates[constants.BINDING_PROFILE] = binding_profile 

3863 

3864 # NOTE(gibi): during live migration the conductor already sets the 

3865 # allocation key in the port binding. However during resize, cold 

3866 # migrate, evacuate and unshelve we have to set the binding here. 

3867 # Also note that during unshelve no migration object is created. 

3868 if self._has_resource_request(context, p, neutron) and ( 

3869 migration is None or not migration.is_live_migration 

3870 ): 

3871 if not provider_mappings: 

3872 # TODO(gibi): Remove this check when compute RPC API is 

3873 # bumped to 6.0 

3874 # NOTE(gibi): This should not happen as the API level 

3875 # minimum compute service version check ensures that the 

3876 # compute services already send the RequestSpec during 

3877 # the move operations between the source and the 

3878 # destination and the dest compute calculates the 

3879 # mapping based on that. 

3880 LOG.warning( 

3881 "Provider mappings are not available to the compute " 

3882 "service but are required for ports with a resource " 

3883 "request. If compute RPC API versions are pinned for " 

3884 "a rolling upgrade, you will need to retry this " 

3885 "operation once the RPC version is unpinned and the " 

3886 "nova-compute services are all upgraded.", 

3887 instance=instance) 

3888 raise exception.PortUpdateFailed( 

3889 port_id=p['id'], 

3890 reason=_( 

3891 "Provider mappings are not available to the " 

3892 "compute service but are required for ports with " 

3893 "a resource request.")) 

3894 

3895 binding_profile[constants.ALLOCATION] = ( 

3896 self._get_binding_profile_allocation( 

3897 context, p, neutron, provider_mappings)) 

3898 updates[constants.BINDING_PROFILE] = binding_profile 

3899 

3900 port_updates.append((p['id'], updates)) 

3901 

3902 # Avoid rolling back updates if we catch an error above. 

3903 # TODO(lbeliveau): Batch up the port updates in one neutron call. 

3904 for port_id, updates in port_updates: 

3905 if updates: 

3906 LOG.info("Updating port %(port)s with " 

3907 "attributes %(attributes)s", 

3908 {"port": port_id, "attributes": updates}, 

3909 instance=instance) 

3910 try: 

3911 neutron.update_port(port_id, {'port': updates}) 

3912 except Exception: 

3913 with excutils.save_and_reraise_exception(): 

3914 LOG.exception("Unable to update binding details " 

3915 "for port %s", 

3916 port_id, instance=instance) 

3917 

3918 def update_instance_vnic_index(self, context, instance, vif, index): 

3919 """Update instance vnic index. 

3920 

3921 When the 'VNIC index' extension is supported this method will update 

3922 the vnic index of the instance on the port. An instance may have more 

3923 than one vnic. 

3924 

3925 :param context: The request context. 

3926 :param instance: nova.objects.instance.Instance object. 

3927 :param vif: The VIF in question. 

3928 :param index: The index on the instance for the VIF. 

3929 """ 

3930 neutron = get_client(context) 

3931 if self.has_vnic_index_extension(client=neutron): 3931 ↛ exitline 3931 didn't return from function 'update_instance_vnic_index' because the condition on line 3931 was always true

3932 port_req_body = {'port': {'vnic_index': index}} 

3933 try: 

3934 neutron.update_port(vif['id'], port_req_body) 

3935 except Exception: 

3936 with excutils.save_and_reraise_exception(): 

3937 LOG.exception('Unable to update instance VNIC index ' 

3938 'for port %s.', 

3939 vif['id'], instance=instance) 

3940 

3941 def get_segment_ids_for_network( 

3942 self, 

3943 context: nova.context.RequestContext, 

3944 network_id: str, 

3945 ) -> ty.List[str]: 

3946 """Query the segmentation ids for the given network. 

3947 

3948 :param context: The request context. 

3949 :param network_id: The UUID of the network to be queried. 

3950 :returns: The list of segment UUIDs of the network or an empty list if 

3951 either Segment extension isn't enabled in Neutron or if the network 

3952 isn't configured for routing. 

3953 """ 

3954 client = get_client(context, admin=True) 

3955 

3956 if not self.has_segment_extension(client=client): 

3957 return [] 

3958 

3959 try: 

3960 # NOTE(sbauza): We can't use list_segments() directly because the 

3961 # API is borked and returns both segments but also segmentation IDs 

3962 # of a provider network if any. 

3963 subnets = client.list_subnets(network_id=network_id, 

3964 fields='segment_id')['subnets'] 

3965 except neutron_client_exc.NeutronClientException as e: 

3966 raise exception.InvalidRoutedNetworkConfiguration( 

3967 'Failed to get segment IDs for network %s' % network_id) from e 

3968 # The segment field of an unconfigured subnet could be None 

3969 return [subnet['segment_id'] for subnet in subnets 

3970 if subnet.get('segment_id') is not None] 

3971 

3972 def get_segment_id_for_subnet( 

3973 self, 

3974 context: nova.context.RequestContext, 

3975 subnet_id: str, 

3976 ) -> ty.Optional[str]: 

3977 """Query the segmentation id for the given subnet. 

3978 

3979 :param context: The request context. 

3980 :param subnet_id: The UUID of the subnet to be queried. 

3981 :returns: The segment UUID of the subnet or None if either Segment 

3982 extension isn't enabled in Neutron or the provided subnet doesn't 

3983 have segments (if the related network isn't configured for routing) 

3984 """ 

3985 client = get_client(context, admin=True) 

3986 

3987 if not self.has_segment_extension(client=client): 

3988 return None 

3989 

3990 try: 

3991 subnet = client.show_subnet(subnet_id)['subnet'] 

3992 except neutron_client_exc.NeutronClientException as e: 

3993 raise exception.InvalidRoutedNetworkConfiguration( 

3994 'Subnet %s not found' % subnet_id) from e 

3995 return subnet.get('segment_id') 

3996 

3997 

3998def _ensure_requested_network_ordering(accessor, unordered, preferred): 

3999 """Sort a list with respect to the preferred network ordering.""" 

4000 if preferred: 

4001 unordered.sort(key=lambda i: preferred.index(accessor(i)))