Coverage for nova/virt/zvm/driver.py: 0%

216 statements  

« prev     ^ index     » next       coverage.py v7.6.12, created at 2025-04-17 15:08 +0000

1# Copyright 2017,2018 IBM Corp. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); you may 

4# not use this file except in compliance with the License. You may obtain 

5# a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

12# License for the specific language governing permissions and limitations 

13# under the License. 

14 

15import eventlet 

16import os 

17import time 

18 

19import os_resource_classes as orc 

20from oslo_concurrency import lockutils 

21from oslo_log import log as logging 

22from oslo_serialization import jsonutils 

23from oslo_utils import excutils 

24 

25from nova.compute import task_states 

26from nova import conf 

27from nova import exception 

28from nova.i18n import _ 

29from nova.image import glance 

30from nova.objects import fields as obj_fields 

31from nova.virt import driver 

32from nova.virt import images 

33from nova.virt.zvm import guest 

34from nova.virt.zvm import hypervisor 

35from nova.virt.zvm import utils as zvmutils 

36 

37 

38LOG = logging.getLogger(__name__) 

39CONF = conf.CONF 

40 

41 

42DEFAULT_EPH_DISK_FMT = 'ext3' 

43 

44 

45class ZVMDriver(driver.ComputeDriver): 

46 """z/VM implementation of ComputeDriver.""" 

47 capabilities = { 

48 "supports_pcpus": False, 

49 "supports_remote_managed_ports": False, 

50 "supports_address_space_passthrough": False, 

51 "supports_address_space_emulated": False, 

52 "supports_stateless_firmware": False, 

53 "supports_virtio_fs": False, 

54 "supports_mem_backing_file": False, 

55 

56 # Image type support flags 

57 "supports_image_type_aki": False, 

58 "supports_image_type_ami": False, 

59 "supports_image_type_ari": False, 

60 "supports_image_type_iso": False, 

61 "supports_image_type_qcow2": False, 

62 "supports_image_type_raw": True, 

63 "supports_image_type_vdi": False, 

64 "supports_image_type_vhd": False, 

65 "supports_image_type_vhdx": False, 

66 "supports_image_type_vmdk": False, 

67 "supports_image_type_ploop": False, 

68 } 

69 

70 def __init__(self, virtapi): 

71 super(ZVMDriver, self).__init__(virtapi) 

72 

73 self._validate_options() 

74 

75 self._hypervisor = hypervisor.Hypervisor( 

76 CONF.zvm.cloud_connector_url, ca_file=CONF.zvm.ca_file) 

77 

78 LOG.info("The zVM compute driver has been initialized.") 

79 

80 @staticmethod 

81 def _validate_options(): 

82 if not CONF.zvm.cloud_connector_url: 

83 error = _('Must specify cloud_connector_url in zvm config ' 

84 'group to use compute_driver=zvm.driver.ZVMDriver') 

85 raise exception.ZVMDriverException(error=error) 

86 

87 # Try a test to ensure length of give guest is smaller than 8 

88 try: 

89 _test_instance = CONF.instance_name_template % 0 

90 except Exception: 

91 msg = _("Template is not usable, the template defined is " 

92 "instance_name_template=%s") % CONF.instance_name_template 

93 raise exception.ZVMDriverException(error=msg) 

94 

95 # For zVM instance, limit the maximum length of instance name to 8 

96 if len(_test_instance) > 8: 

97 msg = _("Can't spawn instance with template '%s', " 

98 "The zVM hypervisor does not support instance names " 

99 "longer than 8 characters. Please change your config of " 

100 "instance_name_template.") % CONF.instance_name_template 

101 raise exception.ZVMDriverException(error=msg) 

102 

103 def init_host(self, host): 

104 pass 

105 

106 def list_instances(self): 

107 return self._hypervisor.list_names() 

108 

109 def instance_exists(self, instance): 

110 # z/VM driver returns name in upper case and because userid is 

111 # stored instead of uuid, list_instance_uuids is not implemented 

112 return self._hypervisor.guest_exists(instance) 

113 

114 def get_available_resource(self, nodename=None): 

115 host_stats = self._hypervisor.get_available_resource() 

116 

117 hypervisor_hostname = self._hypervisor.get_available_nodes()[0] 

118 res = { 

119 'vcpus': host_stats.get('vcpus', 0), 

120 'memory_mb': host_stats.get('memory_mb', 0), 

121 'local_gb': host_stats.get('disk_total', 0), 

122 'vcpus_used': host_stats.get('vcpus_used', 0), 

123 'memory_mb_used': host_stats.get('memory_mb_used', 0), 

124 'local_gb_used': host_stats.get('disk_used', 0), 

125 'hypervisor_type': host_stats.get('hypervisor_type', 

126 obj_fields.HVType.ZVM), 

127 'hypervisor_version': host_stats.get('hypervisor_version', 0), 

128 'hypervisor_hostname': host_stats.get('hypervisor_hostname', 

129 hypervisor_hostname), 

130 'cpu_info': jsonutils.dumps(host_stats.get('cpu_info', {})), 

131 'disk_available_least': host_stats.get('disk_available', 0), 

132 'supported_instances': [(obj_fields.Architecture.S390X, 

133 obj_fields.HVType.ZVM, 

134 obj_fields.VMMode.HVM)], 

135 'numa_topology': None, 

136 } 

137 

138 LOG.debug("Getting available resource for %(host)s:%(nodename)s", 

139 {'host': CONF.host, 'nodename': nodename}) 

140 

141 return res 

142 

143 def get_available_nodes(self, refresh=False): 

144 return self._hypervisor.get_available_nodes(refresh=refresh) 

145 

146 def get_info(self, instance, use_cache=True): 

147 _guest = guest.Guest(self._hypervisor, instance) 

148 return _guest.get_info() 

149 

150 def spawn(self, context, instance, image_meta, injected_files, 

151 admin_password, allocations, network_info=None, 

152 block_device_info=None, power_on=True, accel_info=None): 

153 

154 LOG.info("Spawning new instance %s on zVM hypervisor", 

155 instance.name, instance=instance) 

156 

157 if self._hypervisor.guest_exists(instance): 

158 raise exception.InstanceExists(name=instance.name) 

159 

160 os_distro = image_meta.properties.get('os_distro') 

161 if os_distro is None or len(os_distro) == 0: 

162 reason = _("The `os_distro` image metadata property is required") 

163 raise exception.InvalidInput(reason=reason) 

164 

165 try: 

166 spawn_start = time.time() 

167 

168 transportfiles = zvmutils.generate_configdrive(context, 

169 instance, injected_files, network_info, 

170 admin_password) 

171 

172 spawn_image_name = self._get_image_info(context, image_meta.id, 

173 os_distro) 

174 disk_list, eph_list = self._set_disk_list(instance, 

175 spawn_image_name, 

176 block_device_info) 

177 

178 # Create the guest vm 

179 self._hypervisor.guest_create(instance.name, 

180 instance.vcpus, instance.memory_mb, 

181 disk_list) 

182 

183 # Deploy image to the guest vm 

184 self._hypervisor.guest_deploy(instance.name, 

185 spawn_image_name, transportfiles=transportfiles) 

186 

187 # Handle ephemeral disks 

188 if eph_list: 

189 self._hypervisor.guest_config_minidisks(instance.name, 

190 eph_list) 

191 # Setup network for z/VM instance 

192 self._wait_vif_plug_events(instance.name, os_distro, 

193 network_info, instance) 

194 

195 self._hypervisor.guest_start(instance.name) 

196 spawn_time = time.time() - spawn_start 

197 LOG.info("Instance spawned successfully in %s seconds", 

198 spawn_time, instance=instance) 

199 except Exception as err: 

200 with excutils.save_and_reraise_exception(): 

201 LOG.error("Deploy instance %(instance)s " 

202 "failed with reason: %(err)s", 

203 {'instance': instance.name, 'err': err}, 

204 instance=instance) 

205 try: 

206 self.destroy(context, instance, network_info, 

207 block_device_info) 

208 except Exception: 

209 LOG.exception("Failed to destroy instance", 

210 instance=instance) 

211 

212 @lockutils.synchronized('IMAGE_INFO_SEMAPHORE') 

213 def _get_image_info(self, context, image_meta_id, os_distro): 

214 try: 

215 res = self._hypervisor.image_query(imagename=image_meta_id) 

216 except exception.ZVMConnectorError as err: 

217 with excutils.save_and_reraise_exception() as sare: 

218 if err.overallRC == 404: 

219 sare.reraise = False 

220 self._import_spawn_image(context, image_meta_id, os_distro) 

221 

222 res = self._hypervisor.image_query(imagename=image_meta_id) 

223 

224 return res[0]['imagename'] 

225 

226 def _set_disk_list(self, instance, image_name, block_device_info): 

227 if instance.root_gb == 0: 

228 root_disk_size = self._hypervisor.image_get_root_disk_size( 

229 image_name) 

230 else: 

231 root_disk_size = '%ig' % instance.root_gb 

232 

233 disk_list = [] 

234 root_disk = {'size': root_disk_size, 

235 'is_boot_disk': True 

236 } 

237 disk_list.append(root_disk) 

238 ephemeral_disks_info = driver.block_device_info_get_ephemerals( 

239 block_device_info) 

240 

241 eph_list = [] 

242 for eph in ephemeral_disks_info: 

243 eph_dict = {'size': '%ig' % eph['size'], 

244 'format': (CONF.default_ephemeral_format or 

245 DEFAULT_EPH_DISK_FMT)} 

246 eph_list.append(eph_dict) 

247 

248 if eph_list: 

249 disk_list.extend(eph_list) 

250 return disk_list, eph_list 

251 

252 def _setup_network(self, vm_name, os_distro, network_info, instance): 

253 LOG.debug("Creating NICs for vm %s", vm_name) 

254 inst_nets = [] 

255 for vif in network_info: 

256 subnet = vif['network']['subnets'][0] 

257 _net = {'ip_addr': subnet['ips'][0]['address'], 

258 'gateway_addr': subnet['gateway']['address'], 

259 'cidr': subnet['cidr'], 

260 'mac_addr': vif['address'], 

261 'nic_id': vif['id']} 

262 inst_nets.append(_net) 

263 

264 if inst_nets: 

265 self._hypervisor.guest_create_network_interface(vm_name, 

266 os_distro, inst_nets) 

267 

268 @staticmethod 

269 def _get_neutron_event(network_info): 

270 if CONF.vif_plugging_timeout: 

271 return [('network-vif-plugged', vif['id']) 

272 for vif in network_info if vif.get('active') is False] 

273 

274 return [] 

275 

276 @staticmethod 

277 def _neutron_failed_callback(self, event_name, instance): 

278 LOG.error("Neutron Reported failure on event %s for instance", 

279 event_name, instance=instance) 

280 if CONF.vif_plugging_is_fatal: 

281 raise exception.VirtualInterfaceCreateException() 

282 

283 def _wait_vif_plug_events(self, vm_name, os_distro, network_info, 

284 instance): 

285 timeout = CONF.vif_plugging_timeout 

286 try: 

287 event = self._get_neutron_event(network_info) 

288 with self.virtapi.wait_for_instance_event( 

289 instance, event, deadline=timeout, 

290 error_callback=self._neutron_failed_callback): 

291 self._setup_network(vm_name, os_distro, network_info, instance) 

292 except eventlet.timeout.Timeout: 

293 LOG.warning("Timeout waiting for vif plugging callback.", 

294 instance=instance) 

295 if CONF.vif_plugging_is_fatal: 

296 raise exception.VirtualInterfaceCreateException() 

297 except Exception as err: 

298 with excutils.save_and_reraise_exception(): 

299 LOG.error("Failed for vif plugging: %s", str(err), 

300 instance=instance) 

301 

302 def _import_spawn_image(self, context, image_meta_id, image_os_version): 

303 LOG.debug("Downloading the image %s from glance to nova compute " 

304 "server", image_meta_id) 

305 image_path = os.path.join(os.path.normpath(CONF.zvm.image_tmp_path), 

306 image_meta_id) 

307 if not os.path.exists(image_path): 

308 images.fetch(context, image_meta_id, image_path) 

309 image_url = "file://" + image_path 

310 image_meta = {'os_version': image_os_version} 

311 self._hypervisor.image_import(image_meta_id, image_url, image_meta) 

312 

313 def destroy(self, context, instance, network_info=None, 

314 block_device_info=None, destroy_disks=False): 

315 if self._hypervisor.guest_exists(instance): 

316 LOG.info("Destroying instance", instance=instance) 

317 try: 

318 self._hypervisor.guest_delete(instance.name) 

319 except exception.ZVMConnectorError as err: 

320 if err.overallRC == 404: 

321 LOG.info("instance disappear during destroying", 

322 instance=instance) 

323 else: 

324 raise 

325 else: 

326 LOG.warning("Instance does not exist", instance=instance) 

327 

328 def get_host_uptime(self): 

329 return self._hypervisor.get_host_uptime() 

330 

331 def snapshot(self, context, instance, image_id, update_task_state): 

332 

333 (image_service, image_id) = glance.get_remote_image_service( 

334 context, image_id) 

335 

336 update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) 

337 

338 try: 

339 self._hypervisor.guest_capture(instance.name, image_id) 

340 except Exception as err: 

341 with excutils.save_and_reraise_exception(): 

342 LOG.error("Failed to capture the instance " 

343 "to generate an image with reason: %(err)s", 

344 {'err': err}, instance=instance) 

345 # Clean up the image from glance 

346 image_service.delete(context, image_id) 

347 

348 # Export the image to nova-compute server temporary 

349 image_path = os.path.join(os.path.normpath( 

350 CONF.zvm.image_tmp_path), image_id) 

351 dest_path = "file://" + image_path 

352 try: 

353 resp = self._hypervisor.image_export(image_id, dest_path) 

354 except Exception: 

355 with excutils.save_and_reraise_exception(): 

356 LOG.error("Failed to export image %s from SDK server to " 

357 "nova compute server", image_id) 

358 image_service.delete(context, image_id) 

359 self._hypervisor.image_delete(image_id) 

360 

361 # Save image to glance 

362 new_image_meta = { 

363 'status': 'active', 

364 'properties': { 

365 'image_location': 'snapshot', 

366 'image_state': 'available', 

367 'owner_id': instance['project_id'], 

368 'os_distro': resp['os_version'], 

369 'architecture': obj_fields.Architecture.S390X, 

370 'hypervisor_type': obj_fields.HVType.ZVM, 

371 }, 

372 'disk_format': 'raw', 

373 'container_format': 'bare', 

374 } 

375 update_task_state(task_state=task_states.IMAGE_UPLOADING, 

376 expected_state=task_states.IMAGE_PENDING_UPLOAD) 

377 

378 # Save the image to glance 

379 try: 

380 with open(image_path, 'r') as image_file: 

381 image_service.update(context, 

382 image_id, 

383 new_image_meta, 

384 image_file, 

385 purge_props=False) 

386 except Exception: 

387 with excutils.save_and_reraise_exception(): 

388 image_service.delete(context, image_id) 

389 finally: 

390 zvmutils.clean_up_file(image_path) 

391 self._hypervisor.image_delete(image_id) 

392 

393 LOG.debug("Snapshot image upload complete", instance=instance) 

394 

395 def power_off(self, instance, timeout=0, retry_interval=0): 

396 if timeout >= 0 and retry_interval > 0: 

397 self._hypervisor.guest_softstop(instance.name, timeout=timeout, 

398 retry_interval=retry_interval) 

399 else: 

400 self._hypervisor.guest_softstop(instance.name) 

401 

402 def power_on(self, context, instance, network_info, 

403 block_device_info=None, accel_info=None, share_info=None): 

404 self._hypervisor.guest_start(instance.name) 

405 

406 def pause(self, instance): 

407 self._hypervisor.guest_pause(instance.name) 

408 

409 def unpause(self, instance): 

410 self._hypervisor.guest_unpause(instance.name) 

411 

412 def reboot(self, context, instance, network_info, reboot_type, 

413 block_device_info=None, bad_volumes_callback=None, 

414 accel_info=None, share_info=None): 

415 

416 if reboot_type == 'SOFT': 

417 self._hypervisor.guest_reboot(instance.name) 

418 else: 

419 self._hypervisor.guest_reset(instance.name) 

420 

421 def get_console_output(self, context, instance): 

422 return self._hypervisor.guest_get_console_output(instance.name) 

423 

424 def update_provider_tree(self, provider_tree, nodename, allocations=None): 

425 resources = self._hypervisor.get_available_resource() 

426 

427 inventory = provider_tree.data(nodename).inventory 

428 allocation_ratios = self._get_allocation_ratios(inventory) 

429 

430 inventory = { 

431 orc.VCPU: { 

432 'total': resources['vcpus'], 

433 'min_unit': 1, 

434 'max_unit': resources['vcpus'], 

435 'step_size': 1, 

436 'allocation_ratio': allocation_ratios[orc.VCPU], 

437 'reserved': CONF.reserved_host_cpus, 

438 }, 

439 orc.MEMORY_MB: { 

440 'total': resources['memory_mb'], 

441 'min_unit': 1, 

442 'max_unit': resources['memory_mb'], 

443 'step_size': 1, 

444 'allocation_ratio': allocation_ratios[orc.MEMORY_MB], 

445 'reserved': CONF.reserved_host_memory_mb, 

446 }, 

447 orc.DISK_GB: { 

448 'total': resources['disk_total'], 

449 'min_unit': 1, 

450 'max_unit': resources['disk_total'], 

451 'step_size': 1, 

452 'allocation_ratio': allocation_ratios[orc.DISK_GB], 

453 'reserved': self._get_reserved_host_disk_gb_from_config(), 

454 }, 

455 } 

456 

457 provider_tree.update_inventory(nodename, inventory)