Coverage for nova/virt/libvirt/vif.py: 81%

441 statements  

« prev     ^ index     » next       coverage.py v7.6.12, created at 2025-04-17 15:08 +0000

1# Copyright (C) 2011 Midokura KK 

2# Copyright (C) 2011 Nicira, Inc 

3# Copyright 2011 OpenStack Foundation 

4# All Rights Reserved. 

5# Copyright 2016 Red Hat, Inc. 

6# 

7# Licensed under the Apache License, Version 2.0 (the "License"); you may 

8# not use this file except in compliance with the License. You may obtain 

9# a copy of the License at 

10# 

11# http://www.apache.org/licenses/LICENSE-2.0 

12# 

13# Unless required by applicable law or agreed to in writing, software 

14# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

15# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

16# License for the specific language governing permissions and limitations 

17# under the License. 

18 

19"""VIF drivers for libvirt.""" 

20 

21import os 

22import typing as ty 

23 

24import os_vif 

25from os_vif import exception as osv_exception 

26from os_vif.objects import fields as osv_fields 

27from os_vif.objects import vif as osv_vifs 

28from oslo_concurrency import processutils 

29from oslo_log import log as logging 

30from oslo_utils import strutils 

31 

32import nova.conf 

33from nova import exception 

34from nova.i18n import _ 

35from nova.network import model as network_model 

36from nova.network import os_vif_util 

37from nova import objects 

38from nova.pci import utils as pci_utils 

39import nova.privsep.linux_net 

40from nova import profiler 

41from nova import utils 

42from nova.virt import hardware 

43from nova.virt.libvirt import config as vconfig 

44from nova.virt.libvirt import designer 

45from nova.virt.libvirt import host as libvirt_host 

46from nova.virt import osinfo 

47 

48 

49LOG = logging.getLogger(__name__) 

50 

51CONF = nova.conf.CONF 

52 

53SUPPORTED_VIF_MODELS = { 

54 'qemu': [ 

55 network_model.VIF_MODEL_VIRTIO, 

56 network_model.VIF_MODEL_NE2K_PCI, 

57 network_model.VIF_MODEL_PCNET, 

58 network_model.VIF_MODEL_RTL8139, 

59 network_model.VIF_MODEL_E1000, 

60 network_model.VIF_MODEL_E1000E, 

61 network_model.VIF_MODEL_LAN9118, 

62 network_model.VIF_MODEL_SPAPR_VLAN, 

63 network_model.VIF_MODEL_VMXNET3, 

64 network_model.VIF_MODEL_IGB, 

65 ], 

66 'kvm': [ 

67 network_model.VIF_MODEL_VIRTIO, 

68 network_model.VIF_MODEL_NE2K_PCI, 

69 network_model.VIF_MODEL_PCNET, 

70 network_model.VIF_MODEL_RTL8139, 

71 network_model.VIF_MODEL_E1000, 

72 network_model.VIF_MODEL_E1000E, 

73 network_model.VIF_MODEL_SPAPR_VLAN, 

74 network_model.VIF_MODEL_VMXNET3, 

75 network_model.VIF_MODEL_IGB, 

76 ], 

77 'lxc': [], 

78 'parallels': [ 

79 network_model.VIF_MODEL_VIRTIO, 

80 network_model.VIF_MODEL_RTL8139, 

81 network_model.VIF_MODEL_E1000, 

82 ], 

83} 

84 

85 

86def is_vif_model_valid_for_virt(virt_type, vif_model): 

87 

88 if vif_model is None: 

89 return True 

90 

91 if virt_type not in SUPPORTED_VIF_MODELS: 91 ↛ 92line 91 didn't jump to line 92 because the condition on line 91 was never true

92 raise exception.UnsupportedVirtType(virt=virt_type) 

93 

94 return vif_model in SUPPORTED_VIF_MODELS[virt_type] 

95 

96 

97def set_vf_interface_vlan(pci_addr, mac_addr, vlan=0): 

98 vlan_id = int(vlan) 

99 pf_ifname = pci_utils.get_ifname_by_pci_address(pci_addr, 

100 pf_interface=True) 

101 vf_ifname = pci_utils.get_ifname_by_pci_address(pci_addr) 

102 vf_num = pci_utils.get_vf_num_by_pci_address(pci_addr) 

103 

104 nova.privsep.linux_net.set_device_macaddr_and_vlan( 

105 pf_ifname, vf_num, mac_addr, vlan_id) 

106 

107 # Bring up/down the VF's interface 

108 # TODO(edand): The mac is assigned as a workaround for the following issue 

109 # https://bugzilla.redhat.com/show_bug.cgi?id=1372944 

110 # once resolved it will be removed 

111 port_state = 'up' if vlan_id > 0 else 'down' 

112 nova.privsep.linux_net.set_device_macaddr(vf_ifname, mac_addr, 

113 port_state=port_state) 

114 

115 

116def set_vf_trusted(pci_addr, trusted): 

117 """Configures the VF to be trusted or not 

118 

119 :param pci_addr: PCI slot of the device 

120 :param trusted: Boolean value to indicate whether to 

121 enable/disable 'trusted' capability 

122 """ 

123 pf_ifname = pci_utils.get_ifname_by_pci_address(pci_addr, 

124 pf_interface=True) 

125 vf_num = pci_utils.get_vf_num_by_pci_address(pci_addr) 

126 nova.privsep.linux_net.set_device_trust( 

127 pf_ifname, vf_num, trusted) 

128 

129 

130@utils.synchronized('lock_vlan', external=True) 

131def ensure_vlan(vlan_num, bridge_interface, mac_address=None, mtu=None, 

132 interface=None): 

133 """Create a vlan unless it already exists.""" 

134 if interface is None: 

135 interface = 'vlan%s' % vlan_num 

136 if not nova.privsep.linux_net.device_exists(interface): 

137 LOG.debug('Starting VLAN interface %s', interface) 

138 nova.privsep.linux_net.add_vlan(bridge_interface, interface, 

139 vlan_num) 

140 # (danwent) the bridge will inherit this address, so we want to 

141 # make sure it is the value set from the NetworkManager 

142 if mac_address: 

143 nova.privsep.linux_net.set_device_macaddr( 

144 interface, mac_address) 

145 nova.privsep.linux_net.set_device_enabled(interface) 

146 # NOTE(vish): set mtu every time to ensure that changes to mtu get 

147 # propagated 

148 nova.privsep.linux_net.set_device_mtu(interface, mtu) 

149 return interface 

150 

151 

152@profiler.trace_cls("vif_driver") 

153class LibvirtGenericVIFDriver(object): 

154 """Generic VIF driver for libvirt networking.""" 

155 

156 def __init__(self, host: libvirt_host.Host = None): 

157 super().__init__() 

158 self.host = host 

159 

160 def get_vif_devname(self, vif): 

161 if 'devname' in vif: 161 ↛ 163line 161 didn't jump to line 163 because the condition on line 161 was always true

162 return vif['devname'] 

163 return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN] 

164 

165 def get_vif_model(self, image_meta=None, vif_model=None): 

166 

167 model = vif_model 

168 

169 # If the user has specified a 'vif_model' against the 

170 # image then honour that model 

171 if image_meta: 

172 model = osinfo.HardwareProperties(image_meta).network_model 

173 

174 # If the virt type is KVM/QEMU/VZ(Parallels), then use virtio according 

175 # to the global config parameter 

176 if (model is None and CONF.libvirt.virt_type in 

177 ('kvm', 'qemu', 'parallels') and 

178 CONF.libvirt.use_virtio_for_bridges): 

179 model = network_model.VIF_MODEL_VIRTIO 

180 

181 return model 

182 

183 def get_base_config( 

184 self, instance, mac, image_meta, flavor, virt_type, vnic_type, 

185 ): 

186 # TODO(sahid): We should rewrite it. This method handles too 

187 # many unrelated things. We probably need to have a specific 

188 # virtio, vhost, vhostuser functions. 

189 

190 conf = vconfig.LibvirtConfigGuestInterface() 

191 # Default to letting libvirt / the hypervisor choose the model 

192 model = None 

193 driver = None 

194 vhost_queues = None 

195 rx_queue_size = None 

196 

197 packed = self._get_packed_virtqueue_settings( 

198 image_meta, flavor) 

199 # NOTE(stephenfin): Skip most things here as only apply to virtio 

200 # devices 

201 if vnic_type in network_model.VNIC_TYPES_DIRECT_PASSTHROUGH: 

202 designer.set_vif_guest_frontend_config( 

203 conf, mac, model, driver, vhost_queues, rx_queue_size, packed) 

204 return conf 

205 

206 rx_queue_size = CONF.libvirt.rx_queue_size 

207 

208 # if model has already been defined, 

209 # image_meta contents will override it 

210 model = self.get_vif_model(image_meta=image_meta, vif_model=model) 

211 

212 if not is_vif_model_valid_for_virt(virt_type, model): 212 ↛ 213line 212 didn't jump to line 213 because the condition on line 212 was never true

213 raise exception.UnsupportedHardware(model=model, virt=virt_type) 

214 

215 # The rest of this only applies to virtio 

216 if model != network_model.VIF_MODEL_VIRTIO: 

217 designer.set_vif_guest_frontend_config( 

218 conf, mac, model, driver, vhost_queues, rx_queue_size, packed) 

219 return conf 

220 

221 # Workaround libvirt bug, where it mistakenly enables vhost mode, even 

222 # for non-KVM guests 

223 if virt_type == 'qemu': 

224 driver = 'qemu' 

225 

226 if virt_type in ('kvm', 'parallels'): 

227 vhost_drv, vhost_queues = self._get_virtio_mq_settings( 

228 image_meta, flavor) 

229 # TODO(sahid): It seems that we return driver 'vhost' even 

230 # for vhostuser interface where for vhostuser interface 

231 # the driver should be 'vhost-user'. That currently does 

232 # not create any issue since QEMU ignores the driver 

233 # argument for vhostuser interface but we should probably 

234 # fix that anyway. Also we should enforce that the driver 

235 # use vhost and not None. 

236 driver = vhost_drv or driver 

237 

238 if driver == 'vhost' or driver is None: 

239 # vhost backend only supports update of RX queue size 

240 if rx_queue_size: 

241 # TODO(sahid): Specifically force driver to be vhost 

242 # that because if None we don't generate the XML 

243 # driver element needed to set the queue size 

244 # attribute. This can be removed when get_base_config 

245 # will be fixed and rewrite to set the correct 

246 # backend. 

247 driver = 'vhost' 

248 

249 designer.set_vif_guest_frontend_config( 

250 conf, mac, model, driver, vhost_queues, rx_queue_size, packed) 

251 

252 return conf 

253 

254 def get_base_hostdev_pci_config(self, vif): 

255 conf = vconfig.LibvirtConfigGuestHostdevPCI() 

256 pci_slot = vif['profile']['pci_slot'] 

257 designer.set_vif_host_backend_hostdev_pci_config(conf, pci_slot) 

258 return conf 

259 

260 def _get_virtio_mq_settings(self, image_meta, flavor): 

261 """A methods to set the number of virtio queues, 

262 if it has been requested in extra specs. 

263 """ 

264 if not isinstance(image_meta, objects.ImageMeta): 

265 image_meta = objects.ImageMeta.from_dict(image_meta) 

266 

267 driver = None 

268 vhost_queues = None 

269 if hardware.get_vif_multiqueue_constraint(flavor, image_meta): 

270 driver = 'vhost' 

271 max_tap_queues = self._get_max_tap_queues() 

272 if max_tap_queues: 

273 vhost_queues = (max_tap_queues if flavor.vcpus > max_tap_queues 

274 else flavor.vcpus) 

275 else: 

276 vhost_queues = flavor.vcpus 

277 

278 return (driver, vhost_queues) 

279 

280 def _get_max_tap_queues(self): 

281 # Note(sean-k-mooney): some linux distros have backported 

282 # changes for newer kernels which make the kernel version 

283 # number unreliable to determine the max queues supported 

284 # To address this without making the code distro dependent 

285 # we introduce a new config option and prefer it if set. 

286 if CONF.libvirt.max_queues: 

287 return CONF.libvirt.max_queues 

288 # NOTE(kengo.sakai): In kernels prior to 3.0, 

289 # multiple queues on a tap interface is not supported. 

290 # In kernels 3.x, the number of queues on a tap interface 

291 # is limited to 8. From 4.0, the number is 256. 

292 # See: https://bugs.launchpad.net/nova/+bug/1570631 

293 kernel_version = int(os.uname().release.split(".")[0]) 

294 if kernel_version <= 2: 

295 return 1 

296 elif kernel_version == 3: 

297 return 8 

298 elif kernel_version == 4: 

299 return 256 

300 else: 

301 return None 

302 

303 def _get_packed_virtqueue_settings(self, image_meta, flavor): 

304 """A method to check if Virtio Packed Ring was requested.""" 

305 if not isinstance(image_meta, objects.ImageMeta): 

306 image_meta = objects.ImageMeta.from_dict(image_meta) 

307 

308 return hardware.get_packed_virtqueue_constraint(flavor, image_meta) 

309 

310 def get_bridge_name(self, vif): 

311 return vif['network']['bridge'] 

312 

313 def get_veth_pair_names(self, iface_id): 

314 return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN], 

315 ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN]) 

316 

317 def get_config_802qbg(self, instance, vif, image_meta, flavor, virt_type): 

318 conf = self.get_base_config( 

319 instance, vif['address'], image_meta, flavor, virt_type, 

320 vif['vnic_type']) 

321 

322 params = vif["qbg_params"] 

323 designer.set_vif_host_backend_802qbg_config( 

324 conf, vif['network'].get_meta('interface'), 

325 params['managerid'], 

326 params['typeid'], 

327 params['typeidversion'], 

328 params['instanceid']) 

329 

330 designer.set_vif_bandwidth_config(conf, flavor) 

331 

332 return conf 

333 

334 def get_config_802qbh(self, instance, vif, image_meta, flavor, virt_type): 

335 conf = self.get_base_config( 

336 instance, vif['address'], image_meta, flavor, virt_type, 

337 vif['vnic_type']) 

338 

339 profile = vif["profile"] 

340 vif_details = vif["details"] 

341 net_type = 'direct' 

342 if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT: 342 ↛ 345line 342 didn't jump to line 345 because the condition on line 342 was always true

343 net_type = 'hostdev' 

344 

345 designer.set_vif_host_backend_802qbh_config( 

346 conf, net_type, profile['pci_slot'], 

347 vif_details[network_model.VIF_DETAILS_PROFILEID]) 

348 

349 designer.set_vif_bandwidth_config(conf, flavor) 

350 

351 return conf 

352 

353 def get_config_hw_veb(self, instance, vif, image_meta, flavor, virt_type): 

354 conf = self.get_base_config( 

355 instance, vif['address'], image_meta, flavor, virt_type, 

356 vif['vnic_type']) 

357 

358 profile = vif["profile"] 

359 vif_details = vif["details"] 

360 net_type = 'direct' 

361 if vif['vnic_type'] in [network_model.VNIC_TYPE_DIRECT, 

362 network_model.VNIC_TYPE_ACCELERATOR_DIRECT]: 

363 net_type = 'hostdev' 

364 

365 designer.set_vif_host_backend_hw_veb( 

366 conf, net_type, profile['pci_slot'], 

367 vif_details[network_model.VIF_DETAILS_VLAN]) 

368 

369 designer.set_vif_bandwidth_config(conf, flavor) 

370 

371 return conf 

372 

373 def get_config_hostdev_physical( 

374 self, instance, vif, image_meta, flavor, virt_type, 

375 ): 

376 return self.get_base_hostdev_pci_config(vif) 

377 

378 def get_config_macvtap(self, instance, vif, image_meta, flavor, virt_type): 

379 conf = self.get_base_config( 

380 instance, vif['address'], image_meta, flavor, virt_type, 

381 vif['vnic_type']) 

382 

383 vif_details = vif['details'] 

384 macvtap_src = vif_details.get(network_model.VIF_DETAILS_MACVTAP_SOURCE) 

385 macvtap_mode = vif_details.get(network_model.VIF_DETAILS_MACVTAP_MODE) 

386 phys_interface = vif_details.get( 

387 network_model.VIF_DETAILS_PHYS_INTERFACE) 

388 

389 missing_params = [] 

390 if macvtap_src is None: 

391 missing_params.append(network_model.VIF_DETAILS_MACVTAP_SOURCE) 

392 if macvtap_mode is None: 

393 missing_params.append(network_model.VIF_DETAILS_MACVTAP_MODE) 

394 if phys_interface is None: 

395 missing_params.append(network_model.VIF_DETAILS_PHYS_INTERFACE) 

396 

397 if len(missing_params) > 0: 

398 raise exception.VifDetailsMissingMacvtapParameters( 

399 vif_id=vif['id'], 

400 missing_params=missing_params) 

401 

402 designer.set_vif_host_backend_direct_config( 

403 conf, macvtap_src, macvtap_mode) 

404 

405 designer.set_vif_bandwidth_config(conf, flavor) 

406 

407 return conf 

408 

409 def get_config_iovisor(self, instance, vif, image_meta, flavor, virt_type): 

410 conf = self.get_base_config( 

411 instance, vif['address'], image_meta, flavor, virt_type, 

412 vif['vnic_type']) 

413 

414 dev = self.get_vif_devname(vif) 

415 designer.set_vif_host_backend_ethernet_config(conf, dev) 

416 

417 designer.set_vif_bandwidth_config(conf, flavor) 

418 

419 return conf 

420 

421 def get_config_midonet(self, instance, vif, image_meta, flavor, virt_type): 

422 conf = self.get_base_config( 

423 instance, vif['address'], image_meta, flavor, virt_type, 

424 vif['vnic_type']) 

425 

426 dev = self.get_vif_devname(vif) 

427 designer.set_vif_host_backend_ethernet_config(conf, dev) 

428 

429 return conf 

430 

431 def get_config_tap(self, instance, vif, image_meta, flavor, virt_type): 

432 conf = self.get_base_config( 

433 instance, vif['address'], image_meta, flavor, virt_type, 

434 vif['vnic_type']) 

435 

436 dev = self.get_vif_devname(vif) 

437 designer.set_vif_host_backend_ethernet_config(conf, dev) 

438 

439 network = vif.get('network') 

440 if network and network.get_meta('mtu'): 440 ↛ 441line 440 didn't jump to line 441 because the condition on line 440 was never true

441 designer.set_vif_mtu_config(conf, network.get_meta("mtu")) 

442 

443 return conf 

444 

445 def get_config_ib_hostdev( 

446 self, instance, vif, image_meta, flavor, virt_type, 

447 ): 

448 return self.get_base_hostdev_pci_config(vif) 

449 

450 def _set_config_VIFGeneric(self, instance, vif, conf): 

451 dev = vif.vif_name 

452 designer.set_vif_host_backend_ethernet_config(conf, dev) 

453 

454 def _set_config_VIFBridge(self, instance, vif, conf): 

455 conf.net_type = "bridge" 

456 conf.source_dev = vif.bridge_name 

457 conf.target_dev = vif.vif_name 

458 

459 def _set_config_VIFOpenVSwitch(self, instance, vif, conf): 

460 # if delegating creation to os-vif, create an ethernet-type VIF and let 

461 # os-vif do the actual wiring up 

462 if 'create_port' in vif.port_profile and vif.port_profile.create_port: 

463 self._set_config_VIFGeneric(instance, vif, conf) 

464 else: 

465 conf.net_type = "bridge" 

466 conf.source_dev = vif.bridge_name 

467 conf.target_dev = vif.vif_name 

468 self._set_config_VIFPortProfile(instance, vif, conf) 

469 

470 def _set_config_VIFVHostUser(self, instance, vif, conf): 

471 # TODO(sahid): We should never configure a driver backend for 

472 # vhostuser interface. Specifically override driver to use 

473 # None. This can be removed when get_base_config will be fixed 

474 # and rewrite to set the correct backend. 

475 conf.driver_name = None 

476 

477 designer.set_vif_host_backend_vhostuser_config( 

478 conf, vif.mode, vif.path, CONF.libvirt.rx_queue_size, 

479 CONF.libvirt.tx_queue_size, vif.vif_name) 

480 

481 def _set_config_VIFHostDevice(self, instance, vif, conf): 

482 if vif.dev_type == osv_fields.VIFHostDeviceDevType.ETHERNET: 

483 # This sets the required fields for an <interface type='hostdev'> 

484 # section in a libvirt domain (by using a subset of hw_veb's 

485 # options). 

486 designer.set_vif_host_backend_hw_veb( 

487 conf, 'hostdev', vif.dev_address, None) 

488 else: 

489 # TODO(jangutter): dev_type == VIFHostDeviceDevType.GENERIC 

490 # is currently unsupported under os-vif. The corresponding conf 

491 # class would be: LibvirtConfigGuestHostdevPCI 

492 # but os-vif only returns a LibvirtConfigGuestInterface object 

493 raise exception.InternalError( 

494 _("Unsupported os-vif VIFHostDevice dev_type %(type)s") % 

495 {'type': vif.dev_type}) 

496 

497 def _set_config_VIFPortProfileOpenVSwitch(self, profile, conf): 

498 conf.vporttype = "openvswitch" 

499 conf.add_vport_param("interfaceid", 

500 profile.interface_id) 

501 

502 def _set_config_VIFPortProfile(self, instance, vif, conf): 

503 # Set any port profile that may be required 

504 profile_name = vif.port_profile.obj_name() 

505 if profile_name == 'VIFPortProfileOpenVSwitch': 505 ↛ 508line 505 didn't jump to line 508 because the condition on line 505 was always true

506 self._set_config_VIFPortProfileOpenVSwitch(vif.port_profile, conf) 

507 else: 

508 raise exception.InternalError( 

509 _('Unsupported VIF port profile type %s') % profile_name) 

510 

511 def _get_vdpa_dev_path(self, pci_address: ty.Text) -> ty.Text: 

512 if self.host is not None: 

513 return self.host.get_vdpa_device_path(pci_address) 

514 # TODO(sean-k-mooney) this should never be raised remove when host 

515 # is not optional in __init__. 

516 raise TypeError("self.host must set to use this function.") 

517 

518 def _get_config_os_vif( 

519 self, instance, vif, image_meta, flavor, virt_type, vnic_type, 

520 ): 

521 """Get the domain config for a VIF 

522 

523 :param instance: nova.objects.Instance 

524 :param vif: os_vif.objects.vif.VIFBase subclass 

525 :param image_meta: nova.objects.ImageMeta 

526 :param flavor: nova.objects.Flavor 

527 :param virt_type: virtualization type 

528 :param vnic_type: vnic type 

529 

530 :returns: nova.virt.libvirt.config.LibvirtConfigGuestInterface 

531 """ 

532 

533 # Do the config that's common to all vif types 

534 conf = self.get_base_config( 

535 instance, vif.address, image_meta, flavor, virt_type, vnic_type) 

536 

537 # Do the VIF type specific config 

538 if isinstance(vif, osv_vifs.VIFGeneric): 

539 self._set_config_VIFGeneric(instance, vif, conf) 

540 elif isinstance(vif, osv_vifs.VIFBridge): 

541 self._set_config_VIFBridge(instance, vif, conf) 

542 elif isinstance(vif, osv_vifs.VIFOpenVSwitch): 

543 self._set_config_VIFOpenVSwitch(instance, vif, conf) 

544 elif isinstance(vif, osv_vifs.VIFVHostUser): 

545 self._set_config_VIFVHostUser(instance, vif, conf) 

546 elif isinstance(vif, osv_vifs.VIFHostDevice): 546 ↛ 555line 546 didn't jump to line 555 because the condition on line 546 was always true

547 if vnic_type != network_model.VNIC_TYPE_VDPA: 547 ↛ 550line 547 didn't jump to line 550 because the condition on line 547 was always true

548 self._set_config_VIFHostDevice(instance, vif, conf) 

549 else: 

550 dev_path = self._get_vdpa_dev_path(vif.dev_address) 

551 designer.set_vif_host_backend_vdpa_config( 

552 conf, dev_path, CONF.libvirt.rx_queue_size, 

553 CONF.libvirt.tx_queue_size) 

554 else: 

555 raise exception.InternalError( 

556 _("Unsupported VIF type %s") % vif.obj_name()) 

557 

558 # not all VIF types support bandwidth configuration 

559 # https://github.com/libvirt/libvirt/blob/568a41722/src/conf/netdev_bandwidth_conf.h#L38 

560 if vif.obj_name() not in ('VIFVHostUser', 'VIFHostDevice'): 

561 designer.set_vif_bandwidth_config(conf, flavor) 

562 

563 if 'network' in vif and 'mtu' in vif.network: 563 ↛ 566line 563 didn't jump to line 566 because the condition on line 563 was always true

564 designer.set_vif_mtu_config(conf, vif.network.mtu) 

565 

566 return conf 

567 

568 def get_config(self, instance, vif, image_meta, flavor, virt_type): 

569 vif_type = vif['type'] 

570 vnic_type = vif['vnic_type'] 

571 

572 # instance.display_name could be unicode 

573 instance_repr = utils.get_obj_repr_unicode(instance) 

574 LOG.debug('vif_type=%(vif_type)s instance=%(instance)s ' 

575 'vif=%(vif)s virt_type=%(virt_type)s', 

576 {'vif_type': vif_type, 'instance': instance_repr, 

577 'vif': vif, 'virt_type': virt_type}) 

578 

579 if vif_type is None: 

580 raise exception.InternalError( 

581 _("vif_type parameter must be present " 

582 "for this vif_driver implementation")) 

583 

584 # Try os-vif codepath first 

585 vif_obj = os_vif_util.nova_to_osvif_vif(vif) 

586 if vif_obj is not None: 

587 return self._get_config_os_vif( 

588 instance, vif_obj, image_meta, flavor, virt_type, vnic_type) 

589 

590 # Legacy non-os-vif codepath 

591 args = (instance, vif, image_meta, flavor, virt_type) 

592 if vif_type == network_model.VIF_TYPE_IOVISOR: 

593 return self.get_config_iovisor(*args) 

594 elif vif_type == network_model.VIF_TYPE_802_QBG: 

595 return self.get_config_802qbg(*args) 

596 elif vif_type == network_model.VIF_TYPE_802_QBH: 

597 return self.get_config_802qbh(*args) 

598 elif vif_type == network_model.VIF_TYPE_HW_VEB: 

599 return self.get_config_hw_veb(*args) 

600 elif vif_type == network_model.VIF_TYPE_HOSTDEV: 

601 return self.get_config_hostdev_physical(*args) 

602 elif vif_type == network_model.VIF_TYPE_MACVTAP: 

603 return self.get_config_macvtap(*args) 

604 elif vif_type == network_model.VIF_TYPE_MIDONET: 

605 return self.get_config_midonet(*args) 

606 elif vif_type == network_model.VIF_TYPE_TAP: 

607 return self.get_config_tap(*args) 

608 elif vif_type == network_model.VIF_TYPE_IB_HOSTDEV: 608 ↛ 611line 608 didn't jump to line 611 because the condition on line 608 was always true

609 return self.get_config_ib_hostdev(*args) 

610 

611 raise exception.InternalError(_('Unexpected vif_type=%s') % vif_type) 

612 

613 def plug_ib_hostdev(self, instance, vif): 

614 fabric = vif.get_physical_network() 

615 if not fabric: 

616 raise exception.NetworkMissingPhysicalNetwork( 

617 network_uuid=vif['network']['id'] 

618 ) 

619 pci_slot = vif['profile']['pci_slot'] 

620 device_id = instance['uuid'] 

621 vnic_mac = vif['address'] 

622 try: 

623 nova.privsep.libvirt.plug_infiniband_vif( 

624 vnic_mac, device_id, fabric, 

625 network_model.VIF_TYPE_IB_HOSTDEV, pci_slot) 

626 except processutils.ProcessExecutionError: 

627 LOG.exception("Failed while plugging ib hostdev vif", 

628 instance=instance) 

629 

630 def plug_hw_veb(self, instance, vif): 

631 # TODO(adrianc): The piece of code for MACVTAP can be removed once: 

632 # 1. neutron SR-IOV agent does not rely on the administrative mac 

633 # as depicted in https://bugs.launchpad.net/neutron/+bug/1841067 

634 # 2. libvirt driver does not change mac address for macvtap VNICs 

635 # or Alternatively does not rely on recreating libvirt's nodev 

636 # name from the current mac address set on the netdevice. 

637 # See: virt.libvirt.driver.LibvirtDriver._get_pcinet_info 

638 if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP: 

639 set_vf_interface_vlan( 

640 vif['profile']['pci_slot'], 

641 mac_addr=vif['address'], 

642 vlan=vif['details'][network_model.VIF_DETAILS_VLAN]) 

643 

644 elif vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT: 644 ↛ exitline 644 didn't return from function 'plug_hw_veb' because the condition on line 644 was always true

645 trusted = strutils.bool_from_string( 

646 vif['profile'].get('trusted', "False")) 

647 if trusted: 647 ↛ exitline 647 didn't return from function 'plug_hw_veb' because the condition on line 647 was always true

648 set_vf_trusted(vif['profile']['pci_slot'], True) 

649 

650 def plug_macvtap(self, instance, vif): 

651 vif_details = vif['details'] 

652 vlan = vif_details.get(network_model.VIF_DETAILS_VLAN) 

653 if vlan: 

654 vlan_name = vif_details.get( 

655 network_model.VIF_DETAILS_MACVTAP_SOURCE) 

656 phys_if = vif_details.get(network_model.VIF_DETAILS_PHYS_INTERFACE) 

657 ensure_vlan(vlan, phys_if, interface=vlan_name) 

658 

659 def plug_midonet(self, instance, vif): 

660 """Plug into MidoNet's network port 

661 

662 Bind the vif to a MidoNet virtual port. 

663 """ 

664 dev = self.get_vif_devname(vif) 

665 port_id = vif['id'] 

666 try: 

667 nova.privsep.linux_net.create_tap_dev(dev) 

668 nova.privsep.libvirt.plug_midonet_vif(port_id, dev) 

669 except processutils.ProcessExecutionError: 

670 LOG.exception("Failed while plugging vif", instance=instance) 

671 

672 def plug_iovisor(self, instance, vif): 

673 """Plug using PLUMgrid IO Visor Driver 

674 

675 Connect a network device to their respective 

676 Virtual Domain in PLUMgrid Platform. 

677 """ 

678 dev = self.get_vif_devname(vif) 

679 iface_id = vif['id'] 

680 nova.privsep.linux_net.create_tap_dev(dev) 

681 net_id = vif['network']['id'] 

682 tenant_id = instance.project_id 

683 try: 

684 nova.privsep.libvirt.plug_plumgrid_vif( 

685 dev, iface_id, vif['address'], net_id, tenant_id) 

686 except processutils.ProcessExecutionError: 

687 LOG.exception("Failed while plugging vif", instance=instance) 

688 

689 def plug_tap(self, instance, vif): 

690 """Plug a VIF_TYPE_TAP virtual interface.""" 

691 dev = self.get_vif_devname(vif) 

692 mac = vif['details'].get(network_model.VIF_DETAILS_TAP_MAC_ADDRESS) 

693 image_meta = instance.image_meta 

694 vif_model = self.get_vif_model(image_meta=image_meta) 

695 # TODO(ganso): explore whether multiqueue works for other vif models 

696 # that go through this code path. 

697 multiqueue = False 

698 if vif_model == network_model.VIF_MODEL_VIRTIO: 

699 multiqueue = hardware.get_vif_multiqueue_constraint( 

700 instance.flavor, image_meta) 

701 nova.privsep.linux_net.create_tap_dev(dev, mac, multiqueue=multiqueue) 

702 network = vif.get('network') 

703 mtu = network.get_meta('mtu') if network else None 

704 nova.privsep.linux_net.set_device_mtu(dev, mtu) 

705 

706 def _plug_os_vif(self, instance, vif): 

707 instance_info = os_vif_util.nova_to_osvif_instance(instance) 

708 

709 try: 

710 os_vif.plug(vif, instance_info) 

711 except osv_exception.ExceptionBase as ex: 

712 msg = (_("Failure running os_vif plugin plug method: %(ex)s") 

713 % {'ex': ex}) 

714 raise exception.InternalError(msg) 

715 

716 def plug(self, instance, vif): 

717 vif_type = vif['type'] 

718 

719 # instance.display_name could be unicode 

720 instance_repr = utils.get_obj_repr_unicode(instance) 

721 LOG.debug('vif_type=%(vif_type)s instance=%(instance)s ' 

722 'vif=%(vif)s', 

723 {'vif_type': vif_type, 'instance': instance_repr, 

724 'vif': vif}) 

725 

726 if vif_type is None: 726 ↛ 727line 726 didn't jump to line 727 because the condition on line 726 was never true

727 raise exception.VirtualInterfacePlugException( 

728 _("vif_type parameter must be present " 

729 "for this vif_driver implementation")) 

730 

731 # Try os-vif codepath first 

732 vif_obj = os_vif_util.nova_to_osvif_vif(vif) 

733 if vif_obj is not None: 

734 self._plug_os_vif(instance, vif_obj) 

735 return 

736 

737 # Legacy non-os-vif codepath 

738 if vif_type == network_model.VIF_TYPE_IB_HOSTDEV: 738 ↛ 739line 738 didn't jump to line 739 because the condition on line 738 was never true

739 self.plug_ib_hostdev(instance, vif) 

740 elif vif_type == network_model.VIF_TYPE_HW_VEB: 

741 self.plug_hw_veb(instance, vif) 

742 elif vif_type == network_model.VIF_TYPE_MACVTAP: 

743 self.plug_macvtap(instance, vif) 

744 elif vif_type == network_model.VIF_TYPE_MIDONET: 744 ↛ 745line 744 didn't jump to line 745 because the condition on line 744 was never true

745 self.plug_midonet(instance, vif) 

746 elif vif_type == network_model.VIF_TYPE_IOVISOR: 

747 self.plug_iovisor(instance, vif) 

748 elif vif_type == network_model.VIF_TYPE_TAP: 748 ↛ 750line 748 didn't jump to line 750 because the condition on line 748 was always true

749 self.plug_tap(instance, vif) 

750 elif vif_type in {network_model.VIF_TYPE_802_QBG, 

751 network_model.VIF_TYPE_802_QBH, 

752 network_model.VIF_TYPE_HOSTDEV}: 

753 # These are no-ops 

754 pass 

755 else: 

756 raise exception.VirtualInterfacePlugException( 

757 _("Plug VIF failed because of unexpected " 

758 "vif_type=%s") % vif_type) 

759 

760 def unplug_ib_hostdev(self, instance, vif): 

761 fabric = vif.get_physical_network() 

762 if not fabric: 

763 raise exception.NetworkMissingPhysicalNetwork( 

764 network_uuid=vif['network']['id'] 

765 ) 

766 vnic_mac = vif['address'] 

767 try: 

768 nova.privsep.libvirt.unplug_infiniband_vif(fabric, vnic_mac) 

769 except Exception: 

770 LOG.exception("Failed while unplugging ib hostdev vif") 

771 

772 def unplug_hw_veb(self, instance, vif): 

773 # TODO(sean-k-mooney): remove in Train after backporting 0 mac 

774 # change as this should no longer be needed with libvirt >= 3.2.0. 

775 if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP: 

776 # NOTE(sean-k-mooney): Retaining the vm mac on the vf 

777 # after unplugging the vif prevents the PF from transmitting 

778 # a packet with that destination address. This would create a 

779 # a network partition in the event a vm is migrated or the neuton 

780 # port is reused for another vm before the VF is reused. 

781 # The ip utility accepts the MAC 00:00:00:00:00:00 which can 

782 # be used to reset the VF mac when no longer in use by a vm. 

783 # As such we hardcode the 00:00:00:00:00:00 mac. 

784 set_vf_interface_vlan(vif['profile']['pci_slot'], 

785 mac_addr='00:00:00:00:00:00') 

786 elif vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT: 786 ↛ exitline 786 didn't return from function 'unplug_hw_veb' because the condition on line 786 was always true

787 if "trusted" in vif['profile']: 787 ↛ exitline 787 didn't return from function 'unplug_hw_veb' because the condition on line 787 was always true

788 set_vf_trusted(vif['profile']['pci_slot'], False) 

789 

790 def unplug_midonet(self, instance, vif): 

791 """Unplug from MidoNet network port 

792 

793 Unbind the vif from a MidoNet virtual port. 

794 """ 

795 dev = self.get_vif_devname(vif) 

796 port_id = vif['id'] 

797 try: 

798 nova.privsep.libvirt.unplug_midonet_vif(port_id) 

799 nova.privsep.linux_net.delete_net_dev(dev) 

800 except processutils.ProcessExecutionError: 

801 LOG.exception("Failed while unplugging vif", instance=instance) 

802 

803 def unplug_tap(self, instance, vif): 

804 """Unplug a VIF_TYPE_TAP virtual interface.""" 

805 dev = self.get_vif_devname(vif) 

806 try: 

807 nova.privsep.linux_net.delete_net_dev(dev) 

808 except processutils.ProcessExecutionError: 

809 LOG.exception("Failed while unplugging vif", instance=instance) 

810 

811 def unplug_iovisor(self, instance, vif): 

812 """Unplug using PLUMgrid IO Visor Driver 

813 

814 Delete network device and to their respective 

815 connection to the Virtual Domain in PLUMgrid Platform. 

816 """ 

817 dev = self.get_vif_devname(vif) 

818 try: 

819 nova.privsep.libvirt.unplug_plumgrid_vif(dev) 

820 nova.privsep.linux_net.delete_net_dev(dev) 

821 except processutils.ProcessExecutionError: 

822 LOG.exception("Failed while unplugging vif", instance=instance) 

823 

824 def _unplug_os_vif(self, instance, vif): 

825 instance_info = os_vif_util.nova_to_osvif_instance(instance) 

826 

827 try: 

828 os_vif.unplug(vif, instance_info) 

829 except osv_exception.ExceptionBase as ex: 

830 msg = (_("Failure running os_vif plugin unplug method: %(ex)s") 

831 % {'ex': ex}) 

832 raise exception.InternalError(msg) 

833 

834 def unplug(self, instance, vif): 

835 vif_type = vif['type'] 

836 

837 # instance.display_name could be unicode 

838 instance_repr = utils.get_obj_repr_unicode(instance) 

839 LOG.debug('vif_type=%(vif_type)s instance=%(instance)s ' 

840 'vif=%(vif)s', 

841 {'vif_type': vif_type, 'instance': instance_repr, 

842 'vif': vif}) 

843 

844 if vif_type is None: 844 ↛ 845line 844 didn't jump to line 845 because the condition on line 844 was never true

845 msg = _("vif_type parameter must be present for this vif_driver " 

846 "implementation") 

847 raise exception.InternalError(msg) 

848 

849 # Try os-vif codepath first 

850 vif_obj = os_vif_util.nova_to_osvif_vif(vif) 

851 if vif_obj is not None: 

852 self._unplug_os_vif(instance, vif_obj) 

853 return 

854 

855 # Legacy non-os-vif codepath 

856 if vif_type == network_model.VIF_TYPE_IB_HOSTDEV: 856 ↛ 857line 856 didn't jump to line 857 because the condition on line 856 was never true

857 self.unplug_ib_hostdev(instance, vif) 

858 elif vif_type == network_model.VIF_TYPE_HW_VEB: 

859 self.unplug_hw_veb(instance, vif) 

860 elif vif_type == network_model.VIF_TYPE_MIDONET: 860 ↛ 861line 860 didn't jump to line 861 because the condition on line 860 was never true

861 self.unplug_midonet(instance, vif) 

862 elif vif_type == network_model.VIF_TYPE_IOVISOR: 

863 self.unplug_iovisor(instance, vif) 

864 elif vif_type == network_model.VIF_TYPE_TAP: 864 ↛ 866line 864 didn't jump to line 866 because the condition on line 864 was always true

865 self.unplug_tap(instance, vif) 

866 elif vif_type in {network_model.VIF_TYPE_802_QBG, 

867 network_model.VIF_TYPE_802_QBH, 

868 network_model.VIF_TYPE_HOSTDEV, 

869 network_model.VIF_TYPE_MACVTAP}: 

870 # These are no-ops 

871 pass 

872 else: 

873 # TODO(stephenfin): This should probably raise 

874 # VirtualInterfaceUnplugException 

875 raise exception.InternalError( 

876 _("Unplug VIF failed because of unexpected " 

877 "vif_type=%s") % vif_type)