Coverage for nova/conf/libvirt.py: 97%

33 statements  

« prev     ^ index     » next       coverage.py v7.6.12, created at 2025-04-17 15:08 +0000

1# needs:fix_opt_description 

2# needs:check_deprecation_status 

3# needs:check_opt_group_and_type 

4# needs:fix_opt_description_indentation 

5# needs:fix_opt_registration_consistency 

6 

7# Copyright 2016 OpenStack Foundation 

8# All Rights Reserved. 

9# 

10# Licensed under the Apache License, Version 2.0 (the "License"); you may 

11# not use this file except in compliance with the License. You may obtain 

12# a copy of the License at 

13# 

14# http://www.apache.org/licenses/LICENSE-2.0 

15# 

16# Unless required by applicable law or agreed to in writing, software 

17# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

18# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

19# License for the specific language governing permissions and limitations 

20# under the License. 

21 

22import itertools 

23 

24from oslo_config import cfg 

25 

26from oslo_config import types 

27 

28from nova.conf import paths 

29 

30 

31libvirt_group = cfg.OptGroup("libvirt", 

32 title="Libvirt Options", 

33 help=""" 

34Libvirt options allows cloud administrator to configure related 

35libvirt hypervisor driver to be used within an OpenStack deployment. 

36 

37Almost all of the libvirt config options are influence by ``virt_type`` config 

38which describes the virtualization type (or so called domain type) libvirt 

39should use for specific features such as live migration, snapshot. 

40""") 

41 

42libvirt_general_opts = [ 

43 cfg.StrOpt('rescue_image_id', 

44 help=""" 

45The ID of the image to boot from to rescue data from a corrupted instance. 

46 

47If the rescue REST API operation doesn't provide an ID of an image to 

48use, the image which is referenced by this ID is used. If this 

49option is not set, the image from the instance is used. 

50 

51Possible values: 

52 

53* An ID of an image or nothing. If it points to an *Amazon Machine 

54 Image* (AMI), consider to set the config options ``rescue_kernel_id`` 

55 and ``rescue_ramdisk_id`` too. If nothing is set, the image of the instance 

56 is used. 

57 

58Related options: 

59 

60* ``rescue_kernel_id``: If the chosen rescue image allows the separate 

61 definition of its kernel disk, the value of this option is used, 

62 if specified. This is the case when *Amazon*'s AMI/AKI/ARI image 

63 format is used for the rescue image. 

64* ``rescue_ramdisk_id``: If the chosen rescue image allows the separate 

65 definition of its RAM disk, the value of this option is used if, 

66 specified. This is the case when *Amazon*'s AMI/AKI/ARI image 

67 format is used for the rescue image. 

68"""), 

69 cfg.StrOpt('rescue_kernel_id', 

70 help=""" 

71The ID of the kernel (AKI) image to use with the rescue image. 

72 

73If the chosen rescue image allows the separate definition of its kernel 

74disk, the value of this option is used, if specified. This is the case 

75when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image. 

76 

77Possible values: 

78 

79* An ID of an kernel image or nothing. If nothing is specified, the kernel 

80 disk from the instance is used if it was launched with one. 

81 

82Related options: 

83 

84* ``rescue_image_id``: If that option points to an image in *Amazon*'s 

85 AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id`` too. 

86"""), 

87 cfg.StrOpt('rescue_ramdisk_id', 

88 help=""" 

89The ID of the RAM disk (ARI) image to use with the rescue image. 

90 

91If the chosen rescue image allows the separate definition of its RAM 

92disk, the value of this option is used, if specified. This is the case 

93when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image. 

94 

95Possible values: 

96 

97* An ID of a RAM disk image or nothing. If nothing is specified, the RAM 

98 disk from the instance is used if it was launched with one. 

99 

100Related options: 

101 

102* ``rescue_image_id``: If that option points to an image in *Amazon*'s 

103 AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id`` too. 

104"""), 

105 cfg.StrOpt('virt_type', 

106 default='kvm', 

107 choices=('kvm', 'lxc', 'qemu', 'parallels'), 

108 help=""" 

109Describes the virtualization type (or so called domain type) libvirt should 

110use. 

111 

112The choice of this type must match the underlying virtualization strategy 

113you have chosen for this host. 

114 

115Related options: 

116 

117* ``connection_uri``: depends on this 

118* ``disk_prefix``: depends on this 

119* ``cpu_mode``: depends on this 

120* ``cpu_models``: depends on this 

121* ``tb_cache_size``: depends on this 

122"""), 

123 cfg.StrOpt('connection_uri', 

124 default='', 

125 help=""" 

126Overrides the default libvirt URI of the chosen virtualization type. 

127 

128If set, Nova will use this URI to connect to libvirt. 

129 

130Possible values: 

131 

132* An URI like ``qemu:///system``. 

133 

134 This is only necessary if the URI differs to the commonly known URIs 

135 for the chosen virtualization type. 

136 

137Related options: 

138 

139* ``virt_type``: Influences what is used as default value here. 

140"""), 

141 cfg.BoolOpt('inject_password', 

142 default=False, 

143 help=""" 

144Allow the injection of an admin password for instance only at ``create`` and 

145``rebuild`` process. 

146 

147There is no agent needed within the image to do this. If *libguestfs* is 

148available on the host, it will be used. Otherwise *nbd* is used. The file 

149system of the image will be mounted and the admin password, which is provided 

150in the REST API call will be injected as password for the root user. If no 

151root user is available, the instance won't be launched and an error is thrown. 

152Be aware that the injection is *not* possible when the instance gets launched 

153from a volume. 

154 

155*Linux* distribution guest only. 

156 

157Possible values: 

158 

159* True: Allows the injection. 

160* False: Disallows the injection. Any via the REST API provided admin password 

161 will be silently ignored. 

162 

163Related options: 

164 

165* ``inject_partition``: That option will decide about the discovery and usage 

166 of the file system. It also can disable the injection at all. 

167"""), 

168 cfg.BoolOpt('inject_key', 

169 default=False, 

170 help=""" 

171Allow the injection of an SSH key at boot time. 

172 

173There is no agent needed within the image to do this. If *libguestfs* is 

174available on the host, it will be used. Otherwise *nbd* is used. The file 

175system of the image will be mounted and the SSH key, which is provided 

176in the REST API call will be injected as SSH key for the root user and 

177appended to the ``authorized_keys`` of that user. The SELinux context will 

178be set if necessary. Be aware that the injection is *not* possible when the 

179instance gets launched from a volume. 

180 

181This config option will enable directly modifying the instance disk and does 

182not affect what cloud-init may do using data from config_drive option or the 

183metadata service. 

184 

185*Linux* distribution guest only. 

186 

187Related options: 

188 

189* ``inject_partition``: That option will decide about the discovery and usage 

190 of the file system. It also can disable the injection at all. 

191"""), 

192 cfg.IntOpt('inject_partition', 

193 default=-2, 

194 min=-2, 

195 help=""" 

196Determines how the file system is chosen to inject data into it. 

197 

198*libguestfs* is used to inject data. If libguestfs is not able to determine 

199the root partition (because there are more or less than one root partition) or 

200cannot mount the file system it will result in an error and the instance won't 

201boot. 

202 

203Possible values: 

204 

205* -2 => disable the injection of data. 

206* -1 => find the root partition with the file system to mount with libguestfs 

207* 0 => The image is not partitioned 

208* >0 => The number of the partition to use for the injection 

209 

210*Linux* distribution guest only. 

211 

212Related options: 

213 

214* ``inject_key``: If this option allows the injection of a SSH key it depends 

215 on value greater or equal to -1 for ``inject_partition``. 

216* ``inject_password``: If this option allows the injection of an admin password 

217 it depends on value greater or equal to -1 for ``inject_partition``. 

218* ``[guestfs]/debug`` You can enable the debug log level of libguestfs with 

219 this config option. A more verbose output will help in debugging issues. 

220* ``virt_type``: If you use ``lxc`` as virt_type it will be treated as a 

221 single partition image 

222"""), 

223 cfg.StrOpt('live_migration_scheme', 

224 help=""" 

225URI scheme for live migration used by the source of live migration traffic. 

226 

227Override the default libvirt live migration scheme (which is dependent on 

228virt_type). If this option is set to None, nova will automatically choose a 

229sensible default based on the hypervisor. It is not recommended that you change 

230this unless you are very sure that hypervisor supports a particular scheme. 

231 

232Related options: 

233 

234* ``virt_type``: This option is meaningful only when ``virt_type`` is set to 

235 `kvm` or `qemu`. 

236* ``live_migration_uri``: If ``live_migration_uri`` value is not None, the 

237 scheme used for live migration is taken from ``live_migration_uri`` instead. 

238"""), 

239 cfg.HostDomainOpt('live_migration_inbound_addr', 

240 help=""" 

241IP address used as the live migration address for this host. 

242 

243This option indicates the IP address which should be used as the target for 

244live migration traffic when migrating to this hypervisor. This metadata is then 

245used by the source of the live migration traffic to construct a migration URI. 

246 

247If this option is set to None, the hostname of the migration target compute 

248node will be used. 

249 

250This option is useful in environments where the live-migration traffic can 

251impact the network plane significantly. A separate network for live-migration 

252traffic can then use this config option and avoids the impact on the 

253management network. 

254"""), 

255 cfg.StrOpt('live_migration_uri', 

256 deprecated_for_removal=True, 

257 deprecated_since="15.0.0", 

258 deprecated_reason=""" 

259live_migration_uri is deprecated for removal in favor of two other options that 

260allow to change live migration scheme and target URI: ``live_migration_scheme`` 

261and ``live_migration_inbound_addr`` respectively. 

262""", 

263 help=""" 

264Live migration target URI used by the source of live migration traffic. 

265 

266Override the default libvirt live migration target URI (which is dependent 

267on virt_type). Any included "%s" is replaced with the migration target 

268hostname, or `live_migration_inbound_addr` if set. 

269 

270If this option is set to None (which is the default), Nova will automatically 

271generate the `live_migration_uri` value based on only 4 supported `virt_type` 

272in following list: 

273 

274* 'kvm': 'qemu+tcp://%s/system' 

275* 'qemu': 'qemu+tcp://%s/system' 

276* 'parallels': 'parallels+tcp://%s/system' 

277 

278Related options: 

279 

280* ``live_migration_inbound_addr``: If ``live_migration_inbound_addr`` value 

281 is not None and ``live_migration_tunnelled`` is False, the ip/hostname 

282 address of target compute node is used instead of ``live_migration_uri`` as 

283 the uri for live migration. 

284* ``live_migration_scheme``: If ``live_migration_uri`` is not set, the scheme 

285 used for live migration is taken from ``live_migration_scheme`` instead. 

286"""), 

287 cfg.BoolOpt('live_migration_tunnelled', 

288 default=False, 

289 deprecated_for_removal=True, 

290 deprecated_since='23.0.0', 

291 deprecated_reason=""" 

292The "tunnelled live migration" has two inherent limitations: it cannot 

293handle live migration of disks in a non-shared storage setup; and it has 

294a huge performance cost. Both these problems are solved by 

295``live_migration_with_native_tls`` (requires a pre-configured TLS 

296environment), which is the recommended approach for securing all live 

297migration streams.""", 

298 help=""" 

299Enable tunnelled migration. 

300 

301This option enables the tunnelled migration feature, where migration data is 

302transported over the libvirtd connection. If enabled, we use the 

303VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure 

304the network to allow direct hypervisor to hypervisor communication. 

305If False, use the native transport. If not set, Nova will choose a 

306sensible default based on, for example the availability of native 

307encryption support in the hypervisor. Enabling this option will definitely 

308impact performance massively. 

309 

310Note that this option is NOT compatible with use of block migration. 

311"""), 

312 cfg.IntOpt('live_migration_bandwidth', 

313 default=0, 

314 help=""" 

315Maximum bandwidth(in MiB/s) to be used during migration. 

316 

317If set to 0, the hypervisor will choose a suitable default. Some hypervisors 

318do not support this feature and will return an error if bandwidth is not 0. 

319Please refer to the libvirt documentation for further details. 

320"""), 

321 cfg.IntOpt('live_migration_downtime', 

322 default=500, 

323 min=100, 

324 help=""" 

325Target maximum period of time Nova will try to keep the instance paused 

326during the last part of the memory copy, in *milliseconds*. 

327 

328Minimum downtime is 100ms. You can increase this value if you want to allow 

329live-migrations to complete faster, or avoid live-migration timeout errors 

330by allowing the guest to be paused for longer during the live-migration switch 

331over. This value may be exceeded if there is any reduction on the transfer rate 

332after the VM is paused. 

333 

334Related options: 

335 

336* live_migration_completion_timeout 

337"""), 

338 cfg.IntOpt('live_migration_downtime_steps', 

339 default=10, 

340 min=3, 

341 help=""" 

342Number of incremental steps to reach max downtime value. 

343 

344Minimum number of steps is 3. 

345"""), 

346 cfg.IntOpt('live_migration_downtime_delay', 

347 default=75, 

348 min=3, 

349 help=""" 

350Time to wait, in seconds, between each step increase of the migration 

351downtime. 

352 

353Minimum delay is 3 seconds. Value is per GiB of guest RAM + disk to be 

354transferred, with lower bound of a minimum of 2 GiB per device. 

355"""), 

356 cfg.IntOpt('live_migration_completion_timeout', 

357 default=800, 

358 min=0, 

359 mutable=True, 

360 help=""" 

361Time to wait, in seconds, for migration to successfully complete transferring 

362data before aborting the operation. 

363 

364Value is per GiB of guest RAM + disk to be transferred, with lower bound of 

365a minimum of 2 GiB. Should usually be larger than downtime delay * downtime 

366steps. Set to 0 to disable timeouts. 

367 

368Related options: 

369 

370* live_migration_downtime 

371* live_migration_downtime_steps 

372* live_migration_downtime_delay 

373"""), 

374 cfg.StrOpt('live_migration_timeout_action', 

375 default='abort', 

376 choices=('abort', 'force_complete'), 

377 mutable=True, 

378 help=""" 

379This option will be used to determine what action will be taken against a 

380VM after ``live_migration_completion_timeout`` expires. By default, the live 

381migrate operation will be aborted after completion timeout. If it is set to 

382``force_complete``, the compute service will either pause the VM or trigger 

383post-copy depending on if post copy is enabled and available 

384(``live_migration_permit_post_copy`` is set to True). 

385 

386Related options: 

387 

388* live_migration_completion_timeout 

389* live_migration_permit_post_copy 

390"""), 

391 cfg.BoolOpt('live_migration_permit_post_copy', 

392 default=False, 

393 help=""" 

394This option allows nova to switch an on-going live migration to post-copy 

395mode, i.e., switch the active VM to the one on the destination node before the 

396migration is complete, therefore ensuring an upper bound on the memory that 

397needs to be transferred. Post-copy requires libvirt>=1.3.3 and QEMU>=2.5.0. 

398 

399When permitted, post-copy mode will be automatically activated if 

400we reach the timeout defined by ``live_migration_completion_timeout`` and 

401``live_migration_timeout_action`` is set to 'force_complete'. Note if you 

402change to no timeout or choose to use 'abort', 

403i.e. ``live_migration_completion_timeout = 0``, then there will be no 

404automatic switch to post-copy. 

405 

406The live-migration force complete API also uses post-copy when permitted. If 

407post-copy mode is not available, force complete falls back to pausing the VM 

408to ensure the live-migration operation will complete. 

409 

410When using post-copy mode, if the source and destination hosts lose network 

411connectivity, the VM being live-migrated will need to be rebooted. For more 

412details, please see the Administration guide. 

413 

414Related options: 

415 

416* live_migration_permit_auto_converge 

417* live_migration_timeout_action 

418"""), 

419 cfg.BoolOpt('live_migration_permit_auto_converge', 

420 default=False, 

421 help=""" 

422This option allows nova to start live migration with auto converge on. 

423 

424Auto converge throttles down CPU if a progress of on-going live migration 

425is slow. Auto converge will only be used if this flag is set to True and 

426post copy is not permitted or post copy is unavailable due to the version 

427of libvirt and QEMU in use. 

428 

429Related options: 

430 

431 * live_migration_permit_post_copy 

432"""), 

433 cfg.StrOpt('snapshot_image_format', 

434 choices=[ 

435 ('raw', 'RAW disk format'), 

436 ('qcow2', 'KVM default disk format'), 

437 ('vmdk', 'VMWare default disk format'), 

438 ('vdi', 'VirtualBox default disk format'), 

439 ], 

440 help=""" 

441Determine the snapshot image format when sending to the image service. 

442 

443If set, this decides what format is used when sending the snapshot to the 

444image service. If not set, defaults to same type as source image. 

445"""), 

446 cfg.BoolOpt('live_migration_with_native_tls', 

447 default=False, 

448 help=""" 

449Use QEMU-native TLS encryption when live migrating. 

450 

451This option will allow both migration stream (guest RAM plus device 

452state) *and* disk stream to be transported over native TLS, i.e. TLS 

453support built into QEMU. 

454 

455Prerequisite: TLS environment is configured correctly on all relevant 

456Compute nodes. This means, Certificate Authority (CA), server, client 

457certificates, their corresponding keys, and their file permissions are 

458in place, and are validated. 

459 

460Notes: 

461 

462* To have encryption for migration stream and disk stream (also called: 

463 "block migration"), ``live_migration_with_native_tls`` is the 

464 preferred config attribute instead of ``live_migration_tunnelled``. 

465 

466* The ``live_migration_tunnelled`` will be deprecated in the long-term 

467 for two main reasons: (a) it incurs a huge performance penalty; and 

468 (b) it is not compatible with block migration. Therefore, if your 

469 compute nodes have at least libvirt 4.4.0 and QEMU 2.11.0, it is 

470 strongly recommended to use ``live_migration_with_native_tls``. 

471 

472* The ``live_migration_tunnelled`` and 

473 ``live_migration_with_native_tls`` should not be used at the same 

474 time. 

475 

476* Unlike ``live_migration_tunnelled``, the 

477 ``live_migration_with_native_tls`` *is* compatible with block 

478 migration. That is, with this option, NBD stream, over which disks 

479 are migrated to a target host, will be encrypted. 

480 

481Related options: 

482 

483``live_migration_tunnelled``: This transports migration stream (but not 

484disk stream) over libvirtd. 

485 

486"""), 

487 cfg.StrOpt('disk_prefix', 

488 help=""" 

489Override the default disk prefix for the devices attached to an instance. 

490 

491If set, this is used to identify a free disk device name for a bus. 

492 

493Possible values: 

494 

495* Any prefix which will result in a valid disk device name like 'sda' or 'hda' 

496 for example. This is only necessary if the device names differ to the 

497 commonly known device name prefixes for a virtualization type such as: sd, 

498 xvd, uvd, vd. 

499 

500Related options: 

501 

502* ``virt_type``: Influences which device type is used, which determines 

503 the default disk prefix. 

504"""), 

505 cfg.IntOpt('wait_soft_reboot_seconds', 

506 default=120, 

507 help='Number of seconds to wait for instance to shut down after' 

508 ' soft reboot request is made. We fall back to hard reboot' 

509 ' if instance does not shutdown within this window.'), 

510 cfg.StrOpt('cpu_mode', 

511 choices=[ 

512 ('host-model', 'Clone the host CPU feature flags'), 

513 ('host-passthrough', 'Use the host CPU model exactly'), 

514 ('custom', 'Use the CPU model in ``[libvirt]cpu_models``'), 

515 ('none', "Don't set a specific CPU model. For instances with " 

516 "``[libvirt] virt_type`` as KVM/QEMU, the default CPU model from " 

517 "QEMU will be used, which provides a basic set of CPU features " 

518 "that are compatible with most hosts"), 

519 ], 

520 help=""" 

521Is used to set the CPU mode an instance should have. 

522 

523If ``virt_type="kvm|qemu"``, it will default to ``host-model``, otherwise it 

524will default to ``none``. 

525 

526Related options: 

527 

528* ``cpu_models``: This should be set ONLY when ``cpu_mode`` is set to 

529 ``custom``. Otherwise, it would result in an error and the instance launch 

530 will fail. 

531"""), 

532 cfg.ListOpt('cpu_models', 

533 deprecated_name='cpu_model', 

534 default=[], 

535 help=""" 

536An ordered list of CPU models the host supports. 

537 

538It is expected that the list is ordered so that the more common and less 

539advanced CPU models are listed earlier. Here is an example: 

540``SandyBridge,IvyBridge,Haswell,Broadwell``, the latter CPU model's features is 

541richer that the previous CPU model. 

542 

543Possible values: 

544 

545* The named CPU models can be found via ``virsh cpu-models ARCH``, where 

546 ARCH is your host architecture. 

547 

548Related options: 

549 

550* ``cpu_mode``: This should be set to ``custom`` ONLY when you want to 

551 configure (via ``cpu_models``) a specific named CPU model. Otherwise, it 

552 would result in an error and the instance launch will fail. 

553* ``virt_type``: Only the virtualization types ``kvm`` and ``qemu`` use this. 

554 

555.. note:: 

556 Be careful to only specify models which can be fully supported in 

557 hardware. 

558"""), 

559 cfg.ListOpt( 

560 'cpu_model_extra_flags', 

561 item_type=types.String( 

562 ignore_case=True, 

563 ), 

564 default=[], 

565 help=""" 

566Enable or disable guest CPU flags. 

567 

568To explicitly enable or disable CPU flags, use the ``+flag`` or 

569``-flag`` notation -- the ``+`` sign will enable the CPU flag for the 

570guest, while a ``-`` sign will disable it. If neither ``+`` nor ``-`` 

571is specified, the flag will be enabled, which is the default behaviour. 

572For example, if you specify the following (assuming the said CPU model 

573and features are supported by the host hardware and software):: 

574 

575 [libvirt] 

576 cpu_mode = custom 

577 cpu_models = Cascadelake-Server 

578 cpu_model_extra_flags = -hle, -rtm, +ssbd, mtrr 

579 

580Nova will disable the ``hle`` and ``rtm`` flags for the guest; and it 

581will enable ``ssbd`` and ``mttr`` (because it was specified with neither 

582``+`` nor ``-`` prefix). 

583 

584The CPU flags are case-insensitive. In the following example, the 

585``pdpe1gb`` flag will be disabled for the guest; ``vmx`` and ``pcid`` 

586flags will be enabled:: 

587 

588 [libvirt] 

589 cpu_mode = custom 

590 cpu_models = Haswell-noTSX-IBRS 

591 cpu_model_extra_flags = -PDPE1GB, +VMX, pcid 

592 

593Specifying extra CPU flags is valid in combination with all the three 

594possible values of ``cpu_mode`` config attribute: ``custom`` (this also 

595requires an explicit CPU model to be specified via the ``cpu_models`` 

596config attribute), ``host-model``, or ``host-passthrough``. 

597 

598There can be scenarios where you may need to configure extra CPU flags 

599even for ``host-passthrough`` CPU mode, because sometimes QEMU may 

600disable certain CPU features. An example of this is Intel's "invtsc" 

601(Invariable Time Stamp Counter) CPU flag -- if you need to expose this 

602flag to a Nova instance, you need to explicitly enable it. 

603 

604The possible values for ``cpu_model_extra_flags`` depends on the CPU 

605model in use. Refer to ``/usr/share/libvirt/cpu_map/*.xml`` for possible 

606CPU feature flags for a given CPU model. 

607 

608A special note on a particular CPU flag: ``pcid`` (an Intel processor 

609feature that alleviates guest performance degradation as a result of 

610applying the 'Meltdown' CVE fixes). When configuring this flag with the 

611``custom`` CPU mode, not all CPU models (as defined by QEMU and libvirt) 

612need it: 

613 

614* The only virtual CPU models that include the ``pcid`` capability are 

615 Intel "Haswell", "Broadwell", and "Skylake" variants. 

616 

617* The libvirt / QEMU CPU models "Nehalem", "Westmere", "SandyBridge", 

618 and "IvyBridge" will _not_ expose the ``pcid`` capability by default, 

619 even if the host CPUs by the same name include it. I.e. 'PCID' needs 

620 to be explicitly specified when using the said virtual CPU models. 

621 

622The libvirt driver's default CPU mode, ``host-model``, will do the right 

623thing with respect to handling 'PCID' CPU flag for the guest -- 

624*assuming* you are running updated processor microcode, host and guest 

625kernel, libvirt, and QEMU. The other mode, ``host-passthrough``, checks 

626if 'PCID' is available in the hardware, and if so directly passes it 

627through to the Nova guests. Thus, in context of 'PCID', with either of 

628these CPU modes (``host-model`` or ``host-passthrough``), there is no 

629need to use the ``cpu_model_extra_flags``. 

630 

631Related options: 

632 

633* cpu_mode 

634* cpu_models 

635"""), 

636 cfg.StrOpt('snapshots_directory', 

637 default='$instances_path/snapshots', 

638 help='Location where libvirt driver will store snapshots ' 

639 'before uploading them to image service'), 

640 cfg.ListOpt('disk_cachemodes', 

641 default=[], 

642 help=""" 

643Specific cache modes to use for different disk types. 

644 

645For example: file=directsync,block=none,network=writeback 

646 

647For local or direct-attached storage, it is recommended that you use 

648writethrough (default) mode, as it ensures data integrity and has acceptable 

649I/O performance for applications running in the guest, especially for read 

650operations. However, caching mode none is recommended for remote NFS storage, 

651because direct I/O operations (O_DIRECT) perform better than synchronous I/O 

652operations (with O_SYNC). Caching mode none effectively turns all guest I/O 

653operations into direct I/O operations on the host, which is the NFS client in 

654this environment. 

655 

656Possible cache modes: 

657 

658* default: "It Depends" -- For Nova-managed disks, ``none``, if the host 

659 file system is capable of Linux's 'O_DIRECT' semantics; otherwise 

660 ``writeback``. For volume drivers, the default is driver-dependent: 

661 ``none`` for everything except for SMBFS and Virtuzzo (which use 

662 ``writeback``). 

663* none: With caching mode set to none, the host page cache is disabled, but 

664 the disk write cache is enabled for the guest. In this mode, the write 

665 performance in the guest is optimal because write operations bypass the host 

666 page cache and go directly to the disk write cache. If the disk write cache 

667 is battery-backed, or if the applications or storage stack in the guest 

668 transfer data properly (either through fsync operations or file system 

669 barriers), then data integrity can be ensured. However, because the host 

670 page cache is disabled, the read performance in the guest would not be as 

671 good as in the modes where the host page cache is enabled, such as 

672 writethrough mode. Shareable disk devices, like for a multi-attachable block 

673 storage volume, will have their cache mode set to 'none' regardless of 

674 configuration. 

675* writethrough: With caching set to writethrough mode, the host page cache is 

676 enabled, but the disk write cache is disabled for the guest. Consequently, 

677 this caching mode ensures data integrity even if the applications and storage 

678 stack in the guest do not transfer data to permanent storage properly (either 

679 through fsync operations or file system barriers). Because the host page 

680 cache is enabled in this mode, the read performance for applications running 

681 in the guest is generally better. However, the write performance might be 

682 reduced because the disk write cache is disabled. 

683* writeback: With caching set to writeback mode, both the host page 

684 cache and the disk write cache are enabled for the guest. Because of 

685 this, the I/O performance for applications running in the guest is 

686 good, but the data is not protected in a power failure. As a result, 

687 this caching mode is recommended only for temporary data where 

688 potential data loss is not a concern. 

689 NOTE: Certain backend disk mechanisms may provide safe 

690 writeback cache semantics. Specifically those that bypass the host 

691 page cache, such as QEMU's integrated RBD driver. Ceph documentation 

692 recommends setting this to writeback for maximum performance while 

693 maintaining data safety. 

694* directsync: Like "writethrough", but it bypasses the host page cache. 

695* unsafe: Caching mode of unsafe ignores cache transfer operations 

696 completely. As its name implies, this caching mode should be used only for 

697 temporary data where data loss is not a concern. This mode can be useful for 

698 speeding up guest installations, but you should switch to another caching 

699 mode in production environments. 

700"""), 

701 cfg.StrOpt('rng_dev_path', 

702 default='/dev/urandom', 

703 help=""" 

704The path to an RNG (Random Number Generator) device that will be used as 

705the source of entropy on the host. Since libvirt 1.3.4, any path (that 

706returns random numbers when read) is accepted. The recommended source 

707of entropy is ``/dev/urandom`` -- it is non-blocking, therefore 

708relatively fast; and avoids the limitations of ``/dev/random``, which is 

709a legacy interface. For more details (and comparison between different 

710RNG sources), refer to the "Usage" section in the Linux kernel API 

711documentation for ``[u]random``: 

712http://man7.org/linux/man-pages/man4/urandom.4.html and 

713http://man7.org/linux/man-pages/man7/random.7.html. 

714"""), 

715 cfg.ListOpt('hw_machine_type', 

716 help='For qemu or KVM guests, set this option to specify ' 

717 'a default machine type per host architecture. ' 

718 'You can find a list of supported machine types ' 

719 'in your environment by checking the output of the ' 

720 ':command:`virsh capabilities` command. The format of ' 

721 'the value for this config option is ' 

722 '``host-arch=machine-type``. For example: ' 

723 '``x86_64=machinetype1,armv7l=machinetype2``.'), 

724 cfg.StrOpt('sysinfo_serial', 

725 default='unique', 

726 choices=( 

727 ('none', 'A serial number entry is not added to the guest ' 

728 'domain xml.'), 

729 ('os', 'A UUID serial number is generated from the host ' 

730 '``/etc/machine-id`` file.'), 

731 ('hardware', 'A UUID for the host hardware as reported by ' 

732 'libvirt. This is typically from the host ' 

733 'SMBIOS data, unless it has been overridden ' 

734 'in ``libvirtd.conf``.'), 

735 ('auto', 'Uses the "os" source if possible, else ' 

736 '"hardware".'), 

737 ('unique', 'Uses instance UUID as the serial number.'), 

738 ), 

739 help=""" 

740The data source used to the populate the host "serial" UUID exposed to guest 

741in the virtual BIOS. All choices except ``unique`` will change the serial when 

742migrating the instance to another host. Changing the choice of this option will 

743also affect existing instances on this host once they are stopped and started 

744again. It is recommended to use the default choice (``unique``) since that will 

745not change when an instance is migrated. However, if you have a need for 

746per-host serials in addition to per-instance serial numbers, then consider 

747restricting flavors via host aggregates. 

748""" 

749 ), 

750 cfg.IntOpt('mem_stats_period_seconds', 

751 default=10, 

752 help='A number of seconds to memory usage statistics period. ' 

753 'Zero or negative value mean to disable memory usage ' 

754 'statistics.'), 

755 cfg.ListOpt('uid_maps', 

756 default=[], 

757 help='List of uid targets and ranges.' 

758 'Syntax is guest-uid:host-uid:count. ' 

759 'Maximum of 5 allowed.'), 

760 cfg.ListOpt('gid_maps', 

761 default=[], 

762 help='List of guid targets and ranges.' 

763 'Syntax is guest-gid:host-gid:count. ' 

764 'Maximum of 5 allowed.'), 

765 cfg.IntOpt('realtime_scheduler_priority', 

766 default=1, 

767 help='In a realtime host context vCPUs for guest will run in ' 

768 'that scheduling priority. Priority depends on the host ' 

769 'kernel (usually 1-99)'), 

770 cfg.ListOpt('enabled_perf_events', 

771 default=[], 

772 help= """ 

773Performance events to monitor and collect statistics for. 

774 

775This will allow you to specify a list of events to monitor low-level 

776performance of guests, and collect related statistics via the libvirt 

777driver, which in turn uses the Linux kernel's ``perf`` infrastructure. 

778With this config attribute set, Nova will generate libvirt guest XML to 

779monitor the specified events. 

780 

781For example, to monitor the count of CPU cycles (total/elapsed) and the 

782count of cache misses, enable them as follows:: 

783 

784 [libvirt] 

785 enabled_perf_events = cpu_clock, cache_misses 

786 

787Possible values: A string list. The list of supported events can be 

788found `here`__. Note that Intel CMT events - ``cmt``, ``mbmbt`` and 

789``mbml`` - are unsupported by recent Linux kernel versions (4.14+) and will be 

790ignored by nova. 

791 

792__ https://libvirt.org/formatdomain.html#elementsPerf. 

793"""), 

794 cfg.IntOpt('num_pcie_ports', 

795 default=0, 

796 min=0, 

797 max=28, 

798 help= """ 

799The number of PCIe ports an instance will get. 

800 

801Libvirt allows a custom number of PCIe ports (pcie-root-port controllers) a 

802target instance will get. Some will be used by default, rest will be available 

803for hotplug use. 

804 

805By default we have just 1-2 free ports which limits hotplug. 

806 

807More info: https://github.com/qemu/qemu/blob/master/docs/pcie.txt 

808 

809Due to QEMU limitations for aarch64/virt maximum value is set to '28'. 

810 

811Default value '0' moves calculating amount of ports to libvirt. 

812"""), 

813 cfg.IntOpt('file_backed_memory', 

814 default=0, 

815 min=0, 

816 help=""" 

817Available capacity in MiB for file-backed memory. 

818 

819Set to 0 to disable file-backed memory. 

820 

821When enabled, instances will create memory files in the directory specified 

822in ``/etc/libvirt/qemu.conf``'s ``memory_backing_dir`` option. The default 

823location is ``/var/lib/libvirt/qemu/ram``. 

824 

825When enabled, the value defined for this option is reported as the node memory 

826capacity. Compute node system memory will be used as a cache for file-backed 

827memory, via the kernel's pagecache mechanism. 

828 

829.. note:: 

830 This feature is not compatible with hugepages. 

831 

832.. note:: 

833 This feature is not compatible with memory overcommit. 

834 

835Related options: 

836 

837* ``virt_type`` must be set to ``kvm`` or ``qemu``. 

838* ``ram_allocation_ratio`` must be set to 1.0. 

839"""), 

840 cfg.IntOpt('num_memory_encrypted_guests', 

841 default=None, 

842 min=0, 

843 help=""" 

844Maximum number of guests with encrypted memory which can run 

845concurrently on this compute host. 

846 

847For now this is only relevant for AMD machines which support SEV 

848(Secure Encrypted Virtualization). Such machines have a limited 

849number of slots in their memory controller for storing encryption 

850keys. Each running guest with encrypted memory will consume one of 

851these slots. 

852 

853The option may be reused for other equivalent technologies in the 

854future. If the machine does not support memory encryption, the option 

855will be ignored and inventory will be set to 0. 

856 

857If the machine does support memory encryption and this option is not set, 

858the driver detects maximum number of SEV guests from the libvirt API which 

859is available since v8.0.0. Setting this option overrides the detected limit, 

860unless the given value is not larger than the detected limit. 

861 

862On the other hand, if an older version of libvirt is used, ``None`` means 

863an effectively unlimited inventory, i.e. no limit will be imposed by Nova 

864on the number of SEV guests which can be launched, even though the underlying 

865hardware will enforce its own limit. 

866 

867.. note:: 

868 

869 It is recommended to read :ref:`the deployment documentation's 

870 section on this option <num_memory_encrypted_guests>` before 

871 deciding whether to configure this setting or leave it at the 

872 default. 

873 

874Related options: 

875 

876* :oslo.config:option:`libvirt.virt_type` must be set to ``kvm``. 

877 

878* It's recommended to consider including ``x86_64=q35`` in 

879 :oslo.config:option:`libvirt.hw_machine_type`; see 

880 :ref:`deploying-sev-capable-infrastructure` for more on this. 

881"""), 

882 cfg.IntOpt('device_detach_attempts', 

883 default=8, 

884 min=1, 

885 help=""" 

886Maximum number of attempts the driver tries to detach a device in libvirt. 

887 

888Related options: 

889 

890* :oslo.config:option:`libvirt.device_detach_timeout` 

891 

892"""), 

893 cfg.IntOpt('device_detach_timeout', 

894 default=20, 

895 min=1, 

896 help=""" 

897Maximum number of seconds the driver waits for the success or the failure 

898event from libvirt for a given device detach attempt before it re-trigger the 

899detach. 

900 

901Related options: 

902 

903* :oslo.config:option:`libvirt.device_detach_attempts` 

904 

905"""), 

906 cfg.IntOpt('tb_cache_size', 

907 min=0, 

908 help=""" 

909Qemu>=5.0.0 bumped the default tb-cache size to 1GiB(from 32MiB) and this 

910made it difficult to run multiple guest VMs on systems running with lower 

911memory. With Libvirt>=8.0.0 this config option can be used to configure 

912lower tb-cache size. 

913 

914Set it to > 0 to configure tb-cache for guest VMs. 

915 

916Related options: 

917 

918* ``compute_driver`` (libvirt) 

919* ``virt_type`` (qemu) 

920"""), 

921 

922 cfg.StrOpt('migration_inbound_addr', 

923 default='$my_ip', 

924 help=""" 

925The address used as the migration address for this host. 

926 

927This option indicates the IP address, hostname, or FQDN which should be used as 

928the target for cold migration, resize, and evacuate traffic when moving to this 

929hypervisor. This metadata is then used by the source of the migration traffic 

930to construct the commands used to copy data (e.g. disk image) to the 

931destination. 

932 

933An included "%s" is replaced with the hostname of the migration target 

934hypervisor. 

935 

936 

937Related options: 

938 

939* ``my_ip`` 

940* ``live_migration_inbound_addr`` 

941 

942"""), 

943] 

944 

945libvirt_imagebackend_opts = [ 

946 cfg.StrOpt('images_type', 

947 default='default', 

948 choices=('raw', 'flat', 'qcow2', 'lvm', 'rbd', 'ploop', 

949 'default'), 

950 help=""" 

951VM Images format. 

952 

953If default is specified, then use_cow_images flag is used instead of this 

954one. 

955 

956Related options: 

957 

958* compute.use_cow_images 

959* images_volume_group 

960* [workarounds]/ensure_libvirt_rbd_instance_dir_cleanup 

961* compute.force_raw_images 

962"""), 

963 cfg.StrOpt('images_volume_group', 

964 help=""" 

965LVM Volume Group that is used for VM images, when you specify images_type=lvm 

966 

967Related options: 

968 

969* images_type 

970"""), 

971 cfg.BoolOpt('sparse_logical_volumes', 

972 default=False, 

973 deprecated_for_removal=True, 

974 deprecated_since='18.0.0', 

975 deprecated_reason=""" 

976Sparse logical volumes is a feature that is not tested hence not supported. 

977LVM logical volumes are preallocated by default. If you want thin provisioning, 

978use Cinder thin-provisioned volumes. 

979""", 

980 help=""" 

981Create sparse logical volumes (with virtualsize) if this flag is set to True. 

982"""), 

983 cfg.StrOpt('images_rbd_pool', 

984 default='rbd', 

985 help='The RADOS pool in which rbd volumes are stored'), 

986 cfg.StrOpt('images_rbd_ceph_conf', 

987 default='', # default determined by librados 

988 help='Path to the ceph configuration file to use'), 

989 cfg.StrOpt('images_rbd_glance_store_name', 

990 default='', 

991 help=""" 

992The name of the Glance store that represents the rbd cluster in use by 

993this node. If set, this will allow Nova to request that Glance copy an 

994image from an existing non-local store into the one named by this option 

995before booting so that proper Copy-on-Write behavior is maintained. 

996 

997Related options: 

998 

999* images_type - must be set to ``rbd`` 

1000* images_rbd_glance_copy_poll_interval - controls the status poll frequency 

1001* images_rbd_glance_copy_timeout - controls the overall copy timeout 

1002"""), 

1003 cfg.IntOpt('images_rbd_glance_copy_poll_interval', 

1004 default=15, 

1005 help=""" 

1006The interval in seconds with which to poll Glance after asking for it 

1007to copy an image to the local rbd store. This affects how often we ask 

1008Glance to report on copy completion, and thus should be short enough that 

1009we notice quickly, but not too aggressive that we generate undue load on 

1010the Glance server. 

1011 

1012Related options: 

1013 

1014* images_type - must be set to ``rbd`` 

1015* images_rbd_glance_store_name - must be set to a store name 

1016"""), 

1017 cfg.IntOpt('images_rbd_glance_copy_timeout', 

1018 default=600, 

1019 help=""" 

1020The overall maximum time we will wait for Glance to complete an image 

1021copy to our local rbd store. This should be long enough to allow large 

1022images to be copied over the network link between our local store and the 

1023one where images typically reside. The downside of setting this too long 

1024is just to catch the case where the image copy is stalled or proceeding too 

1025slowly to be useful. Actual errors will be reported by Glance and noticed 

1026according to the poll interval. 

1027 

1028Related options: 

1029 

1030* images_type - must be set to ``rbd`` 

1031* images_rbd_glance_store_name - must be set to a store name 

1032* images_rbd_glance_copy_poll_interval - controls the failure time-to-notice 

1033"""), 

1034 cfg.StrOpt('hw_disk_discard', 

1035 choices=('ignore', 'unmap'), 

1036 help=""" 

1037Discard option for nova managed disks. 

1038 

1039Requires: 

1040 

1041* Libvirt >= 1.0.6 

1042* Qemu >= 1.5 (raw format) 

1043* Qemu >= 1.6 (qcow2 format) 

1044"""), 

1045] 

1046 

1047libvirt_lvm_opts = [ 

1048 cfg.StrOpt('volume_clear', 

1049 default='zero', 

1050 choices=[ 

1051 ('zero', 'Overwrite volumes with zeroes'), 

1052 ('shred', 'Overwrite volumes repeatedly'), 

1053 ('none', 'Do not wipe deleted volumes'), 

1054 ], 

1055 help=""" 

1056Method used to wipe ephemeral disks when they are deleted. Only takes effect 

1057if LVM is set as backing storage. 

1058 

1059Related options: 

1060 

1061* images_type - must be set to ``lvm`` 

1062* volume_clear_size 

1063"""), 

1064 cfg.IntOpt('volume_clear_size', 

1065 default=0, 

1066 min=0, 

1067 help=""" 

1068Size of area in MiB, counting from the beginning of the allocated volume, 

1069that will be cleared using method set in ``volume_clear`` option. 

1070 

1071Possible values: 

1072 

1073* 0 - clear whole volume 

1074* >0 - clear specified amount of MiB 

1075 

1076Related options: 

1077 

1078* images_type - must be set to ``lvm`` 

1079* volume_clear - must be set and the value must be different than ``none`` 

1080 for this option to have any impact 

1081"""), 

1082] 

1083 

1084libvirt_utils_opts = [ 

1085 cfg.BoolOpt('snapshot_compression', 

1086 default=False, 

1087 help=""" 

1088Enable snapshot compression for ``qcow2`` images. 

1089 

1090Note: you can set ``snapshot_image_format`` to ``qcow2`` to force all 

1091snapshots to be in ``qcow2`` format, independently from their original image 

1092type. 

1093 

1094Related options: 

1095 

1096* snapshot_image_format 

1097"""), 

1098] 

1099 

1100libvirt_vif_opts = [ 

1101 cfg.BoolOpt('use_virtio_for_bridges', 

1102 default=True, 

1103 help='Use virtio for bridge interfaces with KVM/QEMU'), 

1104] 

1105 

1106libvirt_volume_opts = [ 

1107 cfg.BoolOpt('volume_use_multipath', 

1108 default=False, 

1109 deprecated_name='iscsi_use_multipath', 

1110 help=""" 

1111Use multipath connection of the iSCSI or FC volume 

1112 

1113Volumes can be connected in the LibVirt as multipath devices. This will 

1114provide high availability and fault tolerance. 

1115"""), 

1116 cfg.BoolOpt('volume_enforce_multipath', 

1117 default=False, 

1118 help=""" 

1119Require multipathd when attaching a volume to an instance. 

1120 

1121When enabled, attachment of volumes will be aborted when multipathd is not 

1122running. Otherwise, it will fallback to single path without error. 

1123 

1124When enabled, the libvirt driver checks availability of mulitpathd when it is 

1125initialized, and the compute service fails to start if multipathd is not 

1126running. 

1127 

1128Related options: 

1129 

1130* volume_use_multipath must be True when this is True 

1131"""), 

1132 cfg.IntOpt('num_volume_scan_tries', 

1133 deprecated_name='num_iscsi_scan_tries', 

1134 default=5, 

1135 help=""" 

1136Number of times to scan given storage protocol to find volume. 

1137"""), 

1138] 

1139 

1140libvirt_volume_aoe_opts = [ 

1141 cfg.IntOpt('num_aoe_discover_tries', 

1142 default=3, 

1143 help=""" 

1144Number of times to rediscover AoE target to find volume. 

1145 

1146Nova provides support for block storage attaching to hosts via AOE (ATA over 

1147Ethernet). This option allows the user to specify the maximum number of retry 

1148attempts that can be made to discover the AoE device. 

1149""") 

1150] 

1151 

1152libvirt_volume_iscsi_opts = [ 

1153 cfg.StrOpt('iscsi_iface', 

1154 deprecated_name='iscsi_transport', 

1155 help=""" 

1156The iSCSI transport iface to use to connect to target in case offload support 

1157is desired. 

1158 

1159Default format is of the form ``<transport_name>.<hwaddress>``, where 

1160``<transport_name>`` is one of (``be2iscsi``, ``bnx2i``, ``cxgb3i``, 

1161``cxgb4i``, ``qla4xxx``, ``ocs``, ``tcp``) and ``<hwaddress>`` is the MAC 

1162address of the interface and can be generated via the ``iscsiadm -m iface`` 

1163command. Do not confuse the ``iscsi_iface`` parameter to be provided here with 

1164the actual transport name. 

1165""") 

1166# iser is also supported, but use LibvirtISERVolumeDriver 

1167# instead 

1168] 

1169 

1170libvirt_volume_iser_opts = [ 

1171 cfg.IntOpt('num_iser_scan_tries', 

1172 default=5, 

1173 help=""" 

1174Number of times to scan iSER target to find volume. 

1175 

1176iSER is a server network protocol that extends iSCSI protocol to use Remote 

1177Direct Memory Access (RDMA). This option allows the user to specify the maximum 

1178number of scan attempts that can be made to find iSER volume. 

1179"""), 

1180 cfg.BoolOpt('iser_use_multipath', 

1181 default=False, 

1182 help=""" 

1183Use multipath connection of the iSER volume. 

1184 

1185iSER volumes can be connected as multipath devices. This will provide high 

1186availability and fault tolerance. 

1187""") 

1188] 

1189 

1190libvirt_volume_net_opts = [ 

1191 cfg.StrOpt('rbd_user', 

1192 help=""" 

1193The RADOS client name for accessing rbd(RADOS Block Devices) volumes. 

1194 

1195Libvirt will refer to this user when connecting and authenticating with 

1196the Ceph RBD server. 

1197"""), 

1198 cfg.StrOpt('rbd_secret_uuid', 

1199 help=""" 

1200The libvirt UUID of the secret for the rbd_user volumes. 

1201"""), 

1202 cfg.IntOpt('rbd_connect_timeout', 

1203 default=5, 

1204 help=""" 

1205The RADOS client timeout in seconds when initially connecting to the cluster. 

1206"""), 

1207 cfg.IntOpt('rbd_destroy_volume_retry_interval', 

1208 default=5, 

1209 min=0, 

1210 help=""" 

1211Number of seconds to wait between each consecutive retry to destroy a 

1212RBD volume. 

1213 

1214Related options: 

1215 

1216* [libvirt]/images_type = 'rbd' 

1217"""), 

1218 cfg.IntOpt('rbd_destroy_volume_retries', 

1219 default=12, 

1220 min=0, 

1221 help=""" 

1222Number of retries to destroy a RBD volume. 

1223 

1224Related options: 

1225 

1226* [libvirt]/images_type = 'rbd' 

1227"""), 

1228] 

1229 

1230libvirt_volume_nfs_opts = [ 

1231 cfg.StrOpt('nfs_mount_point_base', 

1232 default=paths.state_path_def('mnt'), 

1233 help=""" 

1234Directory where the NFS volume is mounted on the compute node. 

1235The default is 'mnt' directory of the location where nova's Python module 

1236is installed. 

1237 

1238NFS provides shared storage for the OpenStack Block Storage service. 

1239 

1240Possible values: 

1241 

1242* A string representing absolute path of mount point. 

1243"""), 

1244 cfg.StrOpt('nfs_mount_options', 

1245 help=""" 

1246Mount options passed to the NFS client. See section of the nfs man page 

1247for details. 

1248 

1249Mount options controls the way the filesystem is mounted and how the 

1250NFS client behaves when accessing files on this mount point. 

1251 

1252Possible values: 

1253 

1254* Any string representing mount options separated by commas. 

1255* Example string: vers=3,lookupcache=pos 

1256"""), 

1257] 

1258 

1259libvirt_volume_ceph_opts = [ 

1260 cfg.StrOpt('ceph_mount_point_base', 

1261 default=paths.state_path_def('mnt'), 

1262 help=""" 

1263Directory where the ceph volume for each manila share is mounted on the 

1264compute node. 

1265The default is 'mnt' directory of the location where nova's Python module 

1266is installed. 

1267 

1268Possible values: 

1269 

1270* A string representing absolute path of mount point. 

1271"""), 

1272 cfg.ListOpt('ceph_mount_options', 

1273 help=""" 

1274Mount options passed to the ceph client. See section of the ceph man page 

1275for details. 

1276 

1277Mount options controls the way the filesystem is mounted and how the 

1278ceph client behaves when accessing files on this mount point. 

1279 

1280Possible values: 

1281 

1282* Any string representing mount options separated by commas. 

1283* Example string: vers=3,lookupcache=pos 

1284"""), 

1285] 

1286 

1287libvirt_volume_quobyte_opts = [ 

1288 cfg.StrOpt('quobyte_mount_point_base', 

1289 default=paths.state_path_def('mnt'), 

1290 deprecated_for_removal=True, 

1291 deprecated_since="31.0.0", 

1292 deprecated_reason=""" 

1293Quobyte volume driver in cinder was marked unsupported. Quobyte volume support 

1294will be removed from nova when the volume driver is removed from cinder. 

1295""", 

1296 help=""" 

1297Directory where the Quobyte volume is mounted on the compute node. 

1298 

1299Nova supports Quobyte volume driver that enables storing Block Storage 

1300service volumes on a Quobyte storage back end. This Option specifies the 

1301path of the directory where Quobyte volume is mounted. 

1302 

1303Possible values: 

1304 

1305* A string representing absolute path of mount point. 

1306"""), 

1307 cfg.StrOpt('quobyte_client_cfg', 

1308 deprecated_for_removal=True, 

1309 deprecated_since="31.0.0", 

1310 deprecated_reason=""" 

1311Quobyte volume driver in cinder was marked unsupported. Quobyte volume support 

1312will be removed from nova when the volume driver is removed from cinder. 

1313""", 

1314 help='Path to a Quobyte Client configuration file.'), 

1315] 

1316 

1317libvirt_volume_smbfs_opts = [ 

1318 cfg.StrOpt('smbfs_mount_point_base', 

1319 default=paths.state_path_def('mnt'), 

1320 deprecated_for_removal=True, 

1321 deprecated_since="31.0.0", 

1322 deprecated_reason=""" 

1323Windows SMB volume driver in cinder was marked unsupported. SMBFS volume 

1324support will be removed from nova when the volume driver is removed from 

1325cinder. 

1326""", 

1327 help=""" 

1328Directory where the SMBFS shares are mounted on the compute node. 

1329"""), 

1330 cfg.StrOpt('smbfs_mount_options', 

1331 default='', 

1332 deprecated_for_removal=True, 

1333 deprecated_since="31.0.0", 

1334 deprecated_reason=""" 

1335Windows SMB volume driver in cinder was marked unsupported. SMBFS volume 

1336support will be removed from nova when the volume driver is removed from 

1337cinder. 

1338""", 

1339 help=""" 

1340Mount options passed to the SMBFS client. 

1341 

1342Provide SMBFS options as a single string containing all parameters. 

1343See mount.cifs man page for details. Note that the libvirt-qemu ``uid`` 

1344and ``gid`` must be specified. 

1345"""), 

1346] 

1347 

1348libvirt_remotefs_opts = [ 

1349 cfg.StrOpt('remote_filesystem_transport', 

1350 default='ssh', 

1351 choices=('ssh', 'rsync'), 

1352 help=""" 

1353libvirt's transport method for remote file operations. 

1354 

1355Because libvirt cannot use RPC to copy files over network to/from other 

1356compute nodes, other method must be used for: 

1357 

1358* creating directory on remote host 

1359* creating file on remote host 

1360* removing file from remote host 

1361* copying file to remote host 

1362""") 

1363] 

1364 

1365libvirt_volume_vzstorage_opts = [ 

1366 cfg.StrOpt('vzstorage_mount_point_base', 

1367 default=paths.state_path_def('mnt'), 

1368 deprecated_for_removal=True, 

1369 deprecated_since="31.0.0", 

1370 deprecated_reason=""" 

1371Virtuozzo Storage volume driver in cinder was marked unsupported. Virtuozzo 

1372Storage volume support will be removed from nova when the volume driver is 

1373removed from cinder. 

1374""", 

1375 help=""" 

1376Directory where the Virtuozzo Storage clusters are mounted on the compute 

1377node. 

1378 

1379This option defines non-standard mountpoint for Vzstorage cluster. 

1380 

1381Related options: 

1382 

1383* vzstorage_mount_* group of parameters 

1384""" 

1385 ), 

1386 cfg.StrOpt('vzstorage_mount_user', 

1387 default='stack', 

1388 deprecated_for_removal=True, 

1389 deprecated_since="31.0.0", 

1390 deprecated_reason=""" 

1391Virtuozzo Storage volume driver in cinder was marked unsupported. Virtuozzo 

1392Storage volume support will be removed from nova when the volume driver is 

1393removed from cinder. 

1394""", 

1395 help=""" 

1396Mount owner user name. 

1397 

1398This option defines the owner user of Vzstorage cluster mountpoint. 

1399 

1400Related options: 

1401 

1402* vzstorage_mount_* group of parameters 

1403""" 

1404 ), 

1405 cfg.StrOpt('vzstorage_mount_group', 

1406 default='qemu', 

1407 help=""" 

1408Mount owner group name. 

1409 

1410This option defines the owner group of Vzstorage cluster mountpoint. 

1411 

1412Related options: 

1413 

1414* vzstorage_mount_* group of parameters 

1415""" 

1416 ), 

1417 cfg.StrOpt('vzstorage_mount_perms', 

1418 default='0770', 

1419 help=""" 

1420Mount access mode. 

1421 

1422This option defines the access bits of Vzstorage cluster mountpoint, 

1423in the format similar to one of chmod(1) utility, like this: 0770. 

1424It consists of one to four digits ranging from 0 to 7, with missing 

1425lead digits assumed to be 0's. 

1426 

1427Related options: 

1428 

1429* vzstorage_mount_* group of parameters 

1430""" 

1431 ), 

1432 cfg.StrOpt('vzstorage_log_path', 

1433 default='/var/log/vstorage/%(cluster_name)s/nova.log.gz', 

1434 help=""" 

1435Path to vzstorage client log. 

1436 

1437This option defines the log of cluster operations, 

1438it should include "%(cluster_name)s" template to separate 

1439logs from multiple shares. 

1440 

1441Related options: 

1442 

1443* vzstorage_mount_opts may include more detailed logging options. 

1444""" 

1445 ), 

1446 cfg.StrOpt('vzstorage_cache_path', 

1447 default=None, 

1448 help=""" 

1449Path to the SSD cache file. 

1450 

1451You can attach an SSD drive to a client and configure the drive to store 

1452a local cache of frequently accessed data. By having a local cache on a 

1453client's SSD drive, you can increase the overall cluster performance by 

1454up to 10 and more times. 

1455WARNING! There is a lot of SSD models which are not server grade and 

1456may loose arbitrary set of data changes on power loss. 

1457Such SSDs should not be used in Vstorage and are dangerous as may lead 

1458to data corruptions and inconsistencies. Please consult with the manual 

1459on which SSD models are known to be safe or verify it using 

1460vstorage-hwflush-check(1) utility. 

1461 

1462This option defines the path which should include "%(cluster_name)s" 

1463template to separate caches from multiple shares. 

1464 

1465Related options: 

1466 

1467* vzstorage_mount_opts may include more detailed cache options. 

1468""" 

1469 ), 

1470 cfg.ListOpt('vzstorage_mount_opts', 

1471 default=[], 

1472 help=""" 

1473Extra mount options for pstorage-mount 

1474 

1475For full description of them, see 

1476https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html 

1477Format is a python string representation of arguments list, like: 

1478"[\'-v\', \'-R\', \'500\']" 

1479Shouldn\'t include -c, -l, -C, -u, -g and -m as those have 

1480explicit vzstorage_* options. 

1481 

1482Related options: 

1483 

1484* All other vzstorage_* options 

1485""" 

1486), 

1487] 

1488 

1489 

1490# The queue size requires value to be a power of two from [256, 1024] 

1491# range. 

1492# https://libvirt.org/formatdomain.html#elementsDriverBackendOptions 

1493QueueSizeType = types.Integer(choices=(256, 512, 1024)) 

1494 

1495libvirt_virtio_queue_sizes = [ 

1496 cfg.Opt('rx_queue_size', 

1497 type=QueueSizeType, 

1498 help=""" 

1499Configure virtio rx queue size. 

1500 

1501This option is only usable for virtio-net device with vhost and 

1502vhost-user backend. Available only with QEMU/KVM. Requires libvirt 

1503v2.3 QEMU v2.7."""), 

1504 cfg.Opt('tx_queue_size', 

1505 type=QueueSizeType, 

1506 help=""" 

1507Configure virtio tx queue size. 

1508 

1509This option is only usable for virtio-net device with vhost-user 

1510backend. Available only with QEMU/KVM. Requires libvirt v3.7 QEMU 

1511v2.10."""), 

1512 cfg.IntOpt('max_queues', default=None, min=1, help=""" 

1513The maximum number of virtio queue pairs that can be enabled 

1514when creating a multiqueue guest. The number of virtio queues 

1515allocated will be the lesser of the CPUs requested by the guest 

1516and the max value defined. By default, this value is set to none 

1517meaning the legacy limits based on the reported kernel 

1518major version will be used. 

1519"""), 

1520 

1521] 

1522 

1523 

1524libvirt_volume_nvmeof_opts = [ 

1525 cfg.IntOpt('num_nvme_discover_tries', 

1526 default=5, 

1527 help=""" 

1528Number of times to rediscover NVMe target to find volume 

1529 

1530Nova provides support for block storage attaching to hosts via NVMe 

1531(Non-Volatile Memory Express). This option allows the user to specify the 

1532maximum number of retry attempts that can be made to discover the NVMe device. 

1533"""), 

1534] 

1535 

1536 

1537libvirt_pmem_opts = [ 

1538 cfg.ListOpt('pmem_namespaces', 

1539 item_type=cfg.types.String(), 

1540 default=[], 

1541 help=""" 

1542Configure persistent memory(pmem) namespaces. 

1543 

1544These namespaces must have been already created on the host. This config 

1545option is in the following format:: 

1546 

1547 "$LABEL:$NSNAME[|$NSNAME][,$LABEL:$NSNAME[|$NSNAME]]" 

1548 

1549* ``$NSNAME`` is the name of the pmem namespace. 

1550* ``$LABEL`` represents one resource class, this is used to generate 

1551 the resource class name as ``CUSTOM_PMEM_NAMESPACE_$LABEL``. 

1552 

1553For example:: 

1554 

1555 [libvirt] 

1556 pmem_namespaces=128G:ns0|ns1|ns2|ns3,262144MB:ns4|ns5,MEDIUM:ns6|ns7 

1557 

1558"""), 

1559] 

1560 

1561 

1562libvirt_vtpm_opts = [ 

1563 cfg.BoolOpt('swtpm_enabled', 

1564 default=False, 

1565 help=""" 

1566Enable emulated TPM (Trusted Platform Module) in guests. 

1567"""), 

1568 cfg.StrOpt('swtpm_user', 

1569 default='tss', 

1570 help=""" 

1571User that swtpm binary runs as. 

1572 

1573When using emulated TPM, the ``swtpm`` binary will run to emulate a TPM 

1574device. The user this binary runs as depends on libvirt configuration, with 

1575``tss`` being the default. 

1576 

1577In order to support cold migration and resize, nova needs to know what user 

1578the swtpm binary is running as in order to ensure that files get the proper 

1579ownership after being moved between nodes. 

1580 

1581Related options: 

1582 

1583* ``swtpm_group`` must also be set. 

1584"""), 

1585 cfg.StrOpt('swtpm_group', 

1586 default='tss', 

1587 help=""" 

1588Group that swtpm binary runs as. 

1589 

1590When using emulated TPM, the ``swtpm`` binary will run to emulate a TPM 

1591device. The user this binary runs as depends on libvirt configuration, with 

1592``tss`` being the default. 

1593 

1594In order to support cold migration and resize, nova needs to know what group 

1595the swtpm binary is running as in order to ensure that files get the proper 

1596ownership after being moved between nodes. 

1597 

1598Related options: 

1599 

1600* ``swtpm_user`` must also be set. 

1601"""), 

1602] 

1603 

1604libvirt_cpu_mgmt_opts = [ 

1605 cfg.BoolOpt('cpu_power_management', 

1606 default=False, 

1607 help='Use libvirt to manage CPU cores performance.'), 

1608 cfg.StrOpt('cpu_power_management_strategy', 

1609 choices=['cpu_state', 'governor'], 

1610 default='cpu_state', 

1611 help='Tuning strategy to reduce CPU power consumption when ' 

1612 'unused'), 

1613 cfg.StrOpt('cpu_power_governor_low', 

1614 default='powersave', 

1615 help='Governor to use in order ' 

1616 'to reduce CPU power consumption'), 

1617 cfg.StrOpt('cpu_power_governor_high', 

1618 default='performance', 

1619 help='Governor to use in order to have best CPU performance'), 

1620] 

1621 

1622ALL_OPTS = list(itertools.chain( 

1623 libvirt_general_opts, 

1624 libvirt_imagebackend_opts, 

1625 libvirt_lvm_opts, 

1626 libvirt_utils_opts, 

1627 libvirt_vif_opts, 

1628 libvirt_volume_opts, 

1629 libvirt_volume_aoe_opts, 

1630 libvirt_volume_iscsi_opts, 

1631 libvirt_volume_iser_opts, 

1632 libvirt_volume_net_opts, 

1633 libvirt_volume_nfs_opts, 

1634 libvirt_volume_ceph_opts, 

1635 libvirt_volume_quobyte_opts, 

1636 libvirt_volume_smbfs_opts, 

1637 libvirt_remotefs_opts, 

1638 libvirt_volume_vzstorage_opts, 

1639 libvirt_virtio_queue_sizes, 

1640 libvirt_volume_nvmeof_opts, 

1641 libvirt_pmem_opts, 

1642 libvirt_vtpm_opts, 

1643 libvirt_cpu_mgmt_opts, 

1644)) 

1645 

1646 

1647def register_opts(conf): 

1648 conf.register_group(libvirt_group) 

1649 conf.register_opts(ALL_OPTS, group=libvirt_group) 

1650 

1651 

1652def list_opts(): 

1653 return {libvirt_group: ALL_OPTS}