Coverage for nova/conf/compute.py: 95%
22 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
1# needs:check_deprecation_status
3# Copyright 2015 Huawei Technology corp.
4# Copyright 2015 OpenStack Foundation
5# All Rights Reserved.
6#
7# Licensed under the Apache License, Version 2.0 (the "License"); you may
8# not use this file except in compliance with the License. You may obtain
9# a copy of the License at
10#
11# http://www.apache.org/licenses/LICENSE-2.0
12#
13# Unless required by applicable law or agreed to in writing, software
14# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16# License for the specific language governing permissions and limitations
17# under the License.
19import socket
21from oslo_config import cfg
22from oslo_config import types
24from nova.conf import paths
26compute_group = cfg.OptGroup(
27 'compute',
28 title='Compute Manager Options',
29 help="""
30A collection of options specific to the nova-compute service.
31""")
32compute_opts = [
33 cfg.StrOpt('compute_driver',
34 help="""
35Defines which driver to use for controlling virtualization.
37Possible values:
39* ``libvirt.LibvirtDriver``
40* ``fake.FakeDriver``
41* ``ironic.IronicDriver``
42* ``vmwareapi.VMwareVCDriver``
43* ``zvm.ZVMDriver``
44"""),
45 cfg.BoolOpt('allow_resize_to_same_host',
46 default=False,
47 help="""
48Allow destination machine to match source for resize. Useful when
49testing in single-host environments. By default it is not allowed
50to resize to the same host. Setting this option to true will add
51the same host to the destination options. Also set to true
52if you allow the ServerGroupAffinityFilter and need to resize. For changes to
53this option to take effect, the nova-api service needs to be restarted.
54"""),
55 cfg.ListOpt('non_inheritable_image_properties',
56 default=['cache_in_nova', 'bittorrent'],
57 help="""
58Image properties that should not be inherited from the instance
59when taking a snapshot.
61This option gives an opportunity to select which image-properties
62should not be inherited by newly created snapshots.
64.. note::
66 The following image properties are *never* inherited regardless of
67 whether they are listed in this configuration option or not:
69 * cinder_encryption_key_id
70 * cinder_encryption_key_deletion_policy
71 * img_signature
72 * img_signature_hash_method
73 * img_signature_key_type
74 * img_signature_certificate_uuid
76Possible values:
78* A comma-separated list whose item is an image property. Usually only
79 the image properties that are only needed by base images can be included
80 here, since the snapshots that are created from the base images don't
81 need them.
82* Default list: cache_in_nova, bittorrent
84"""),
85 cfg.IntOpt('max_local_block_devices',
86 default=3,
87 help="""
88Maximum number of devices that will result in a local image being
89created on the hypervisor node.
91A negative number means unlimited. Setting ``max_local_block_devices``
92to 0 means that any request that attempts to create a local disk
93will fail. This option is meant to limit the number of local discs
94(so root local disc that is the result of ``imageRef`` being used when
95creating a server, and any other ephemeral and swap disks). 0 does not
96mean that images will be automatically converted to volumes and boot
97instances from volumes - it just means that all requests that attempt
98to create a local disk will fail.
100Possible values:
102* 0: Creating a local disk is not allowed.
103* Negative number: Allows unlimited number of local discs.
104* Positive number: Allows only these many number of local discs.
105"""),
106 cfg.ListOpt('compute_monitors',
107 default=[],
108 help="""
109A comma-separated list of monitors that can be used for getting
110compute metrics. You can use the alias/name from the setuptools
111entry points for nova.compute.monitors.* namespaces. If no
112namespace is supplied, the "cpu." namespace is assumed for
113backwards-compatibility.
115NOTE: Only one monitor per namespace (For example: cpu) can be loaded at
116a time.
118Possible values:
120* An empty list will disable the feature (Default).
121* An example value that would enable the CPU
122 bandwidth monitor that uses the virt driver variant::
124 compute_monitors = cpu.virt_driver
125"""),
126 cfg.StrOpt('default_ephemeral_format',
127 help="""
128The default format an ephemeral_volume will be formatted with on creation.
130Possible values:
132* ``ext2``
133* ``ext3``
134* ``ext4``
135* ``xfs``
136* ``ntfs`` (only for Windows guests)
137"""),
138 cfg.BoolOpt('vif_plugging_is_fatal',
139 default=True,
140 help="""
141Determine if instance should boot or fail on VIF plugging timeout.
143Nova sends a port update to Neutron after an instance has been scheduled,
144providing Neutron with the necessary information to finish setup of the port.
145Once completed, Neutron notifies Nova that it has finished setting up the
146port, at which point Nova resumes the boot of the instance since network
147connectivity is now supposed to be present. A timeout will occur if the reply
148is not received after a given interval.
150This option determines what Nova does when the VIF plugging timeout event
151happens. When enabled, the instance will error out. When disabled, the
152instance will continue to boot on the assumption that the port is ready.
154Possible values:
156* True: Instances should fail after VIF plugging timeout
157* False: Instances should continue booting after VIF plugging timeout
158"""),
159 cfg.IntOpt('vif_plugging_timeout',
160 default=300,
161 min=0,
162 help="""
163Timeout for Neutron VIF plugging event message arrival.
165Number of seconds to wait for Neutron vif plugging events to
166arrive before continuing or failing (see 'vif_plugging_is_fatal').
168If you are hitting timeout failures at scale, consider running rootwrap
169in "daemon mode" in the neutron agent via the ``[agent]/root_helper_daemon``
170neutron configuration option.
172Related options:
174* vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero and
175 ``vif_plugging_is_fatal`` is False, events should not be expected to
176 arrive at all.
177"""),
178 cfg.IntOpt('arq_binding_timeout',
179 default=300,
180 min=1,
181 help="""
182Timeout for Accelerator Request (ARQ) bind event message arrival.
184Number of seconds to wait for ARQ bind resolution event to arrive.
185The event indicates that every ARQ for an instance has either bound
186successfully or failed to bind. If it does not arrive, instance bringup
187is aborted with an exception.
188"""),
189 cfg.StrOpt('injected_network_template',
190 default=paths.basedir_def('nova/virt/interfaces.template'),
191 help="""Path to '/etc/network/interfaces' template.
193The path to a template file for the '/etc/network/interfaces'-style file, which
194will be populated by nova and subsequently used by cloudinit. This provides a
195method to configure network connectivity in environments without a DHCP server.
197The template will be rendered using Jinja2 template engine, and receive a
198top-level key called ``interfaces``. This key will contain a list of
199dictionaries, one for each interface.
201Refer to the cloudinit documentation for more information:
203 https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
205Possible values:
207* A path to a Jinja2-formatted template for a Debian '/etc/network/interfaces'
208 file. This applies even if using a non Debian-derived guest.
210Related options:
212* ``flat_inject``: This must be set to ``True`` to ensure nova embeds network
213 configuration information in the metadata provided through the config drive.
214"""),
215 cfg.StrOpt('preallocate_images',
216 default='none',
217 choices=[
218 ('none', 'No storage provisioning is done up front'),
219 ('space', 'Storage is fully allocated at instance start')
220 ],
221 help="""
222The image preallocation mode to use.
224Image preallocation allows storage for instance images to be allocated up front
225when the instance is initially provisioned. This ensures immediate feedback is
226given if enough space isn't available. In addition, it should significantly
227improve performance on writes to new blocks and may even improve I/O
228performance to prewritten blocks due to reduced fragmentation.
229"""),
230 cfg.BoolOpt('use_cow_images',
231 default=True,
232 help="""
233Enable use of copy-on-write (cow) images.
235QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
236backing files will not be used.
237"""),
238 cfg.BoolOpt('force_raw_images',
239 default=True,
240 help="""
241Force conversion of backing images to raw format.
243Possible values:
245* True: Backing image files will be converted to raw image format
246* False: Backing image files will not be converted
248Related options:
250* ``compute_driver``: Only the libvirt driver uses this option.
251* ``[libvirt]/images_type``: If images_type is rbd, setting this option
252 to False is not allowed. See the bug
253 https://bugs.launchpad.net/nova/+bug/1816686 for more details.
254"""),
255# NOTE(yamahata): ListOpt won't work because the command may include a comma.
256# For example:
257#
258# mkfs.ext4 -O dir_index,extent -E stride=8,stripe-width=16
259# --label %(fs_label)s %(target)s
260#
261# list arguments are comma separated and there is no way to escape such
262# commas.
263 cfg.MultiStrOpt('virt_mkfs',
264 default=[],
265 help="""
266Name of the mkfs commands for ephemeral device.
268The format is <os_type>=<mkfs command>
269"""),
270 cfg.BoolOpt('resize_fs_using_block_device',
271 default=False,
272 help="""
273Enable resizing of filesystems via a block device.
275If enabled, attempt to resize the filesystem by accessing the image over a
276block device. This is done by the host and may not be necessary if the image
277contains a recent version of cloud-init. Possible mechanisms require the nbd
278driver (for qcow and raw), or loop (for raw).
279"""),
280 cfg.IntOpt('timeout_nbd',
281 default=10,
282 min=0,
283 help='Amount of time, in seconds, to wait for NBD device start up.'),
284 cfg.StrOpt('pointer_model',
285 default='usbtablet',
286 choices=[
287 ('ps2mouse', 'Uses relative movement. Mouse connected by PS2'),
288 ('usbtablet', 'Uses absolute movement. Tablet connect by USB'),
289 (None, 'Uses default behavior provided by drivers (mouse on PS2 '
290 'for libvirt x86)'),
291 ],
292 help="""
293Generic property to specify the pointer type.
295Input devices allow interaction with a graphical framebuffer. For
296example to provide a graphic tablet for absolute cursor movement.
298If set, either the ``hw_input_bus`` or ``hw_pointer_model`` image metadata
299properties will take precedence over this configuration option.
301Related options:
303* usbtablet must be configured with VNC enabled or SPICE enabled and SPICE
304 agent disabled. When used with libvirt the instance mode should be
305 configured as HVM.
306 """),
307 cfg.IntOpt('reimage_timeout_per_gb',
308 default=20,
309 min=1,
310 help="""
311Timeout for reimaging a volume.
313Number of seconds to wait for volume-reimaged events to arrive before
314continuing or failing.
316This is a per gigabyte time which has a default value of 20 seconds and
317will be multiplied by the GB size of image. Eg: an image of 6 GB will have
318a timeout of 20 * 6 = 120 seconds.
319Try increasing the timeout if the image copy per GB takes more time and you
320are hitting timeout failures.
321"""),
322]
324resource_tracker_opts = [
325 cfg.StrOpt('vcpu_pin_set',
326 deprecated_for_removal=True,
327 deprecated_since='20.0.0',
328 deprecated_reason="""
329This option has been superseded by the ``[compute] cpu_dedicated_set`` and
330``[compute] cpu_shared_set`` options, which allow things like the co-existence
331of pinned and unpinned instances on the same host (for the libvirt driver).
332""",
333 help="""
334Mask of host CPUs that can be used for ``VCPU`` resources.
336The behavior of this option depends on the definition of the ``[compute]
337cpu_dedicated_set`` option and affects the behavior of the ``[compute]
338cpu_shared_set`` option.
340* If ``[compute] cpu_dedicated_set`` is defined, defining this option will
341 result in an error.
343* If ``[compute] cpu_dedicated_set`` is not defined, this option will be used
344 to determine inventory for ``VCPU`` resources and to limit the host CPUs
345 that both pinned and unpinned instances can be scheduled to, overriding the
346 ``[compute] cpu_shared_set`` option.
348Possible values:
350* A comma-separated list of physical CPU numbers that virtual CPUs can be
351 allocated from. Each element should be either a single CPU number, a range of
352 CPU numbers, or a caret followed by a CPU number to be excluded from a
353 previous range. For example::
355 vcpu_pin_set = "4-12,^8,15"
357Related options:
359* ``[compute] cpu_dedicated_set``
360* ``[compute] cpu_shared_set``
361"""),
362 cfg.MultiOpt('reserved_huge_pages',
363 item_type=types.Dict(),
364 help="""
365Number of huge/large memory pages to reserve per NUMA host cell.
367Possible values:
369* A list of valid key=value which reflect NUMA node ID, page size
370 (Default unit is KiB) and number of pages to be reserved. For example::
372 reserved_huge_pages = node:0,size:2048,count:64
373 reserved_huge_pages = node:1,size:1GB,count:1
375 In this example we are reserving on NUMA node 0 64 pages of 2MiB
376 and on NUMA node 1 1 page of 1GiB.
377"""),
378 cfg.IntOpt('reserved_host_disk_mb',
379 min=0,
380 default=0,
381 help="""
382Amount of disk resources in MB to make them always available to host. The
383disk usage gets reported back to the scheduler from nova-compute running
384on the compute nodes. To prevent the disk resources from being considered
385as available, this option can be used to reserve disk space for that host.
387Possible values:
389* Any positive integer representing amount of disk in MB to reserve
390 for the host.
391"""),
392 cfg.IntOpt('reserved_host_memory_mb',
393 default=512,
394 min=0,
395 help="""
396Amount of memory in MB to reserve for the host so that it is always available
397to host processes. The host resources usage is reported back to the scheduler
398continuously from nova-compute running on the compute node. To prevent the host
399memory from being considered as available, this option is used to reserve
400memory for the host.
402Possible values:
404* Any positive integer representing amount of memory in MB to reserve
405 for the host.
406"""),
407 cfg.IntOpt('reserved_host_cpus',
408 default=0,
409 min=0,
410 help="""
411Number of host CPUs to reserve for host processes.
413The host resources usage is reported back to the scheduler continuously from
414nova-compute running on the compute node. This value is used to determine the
415``reserved`` value reported to placement.
417This option cannot be set if the ``[compute] cpu_shared_set`` or ``[compute]
418cpu_dedicated_set`` config options have been defined. When these options are
419defined, any host CPUs not included in these values are considered reserved for
420the host.
422Possible values:
424* Any positive integer representing number of physical CPUs to reserve
425 for the host.
427Related options:
429* ``[compute] cpu_shared_set``
430* ``[compute] cpu_dedicated_set``
431"""),
432]
434allocation_ratio_opts = [
435 cfg.FloatOpt('cpu_allocation_ratio',
436 default=None,
437 min=0.0,
438 help="""
439Virtual CPU to physical CPU allocation ratio.
441This option is used to influence the hosts selected by the Placement API by
442configuring the allocation ratio for ``VCPU`` inventory.
444.. note::
446 This option does not affect ``PCPU`` inventory, which cannot be
447 overcommitted.
449.. note::
451 If this option is set to something *other than* ``None`` or ``0.0``, the
452 allocation ratio will be overwritten by the value of this option, otherwise,
453 the allocation ratio will not change. Once set to a non-default value, it is
454 not possible to "unset" the config to get back to the default behavior. If
455 you want to reset back to the initial value, explicitly specify it to the
456 value of ``initial_cpu_allocation_ratio``.
458Possible values:
460* Any valid positive integer or float value
462Related options:
464* ``initial_cpu_allocation_ratio``
465"""),
466 cfg.FloatOpt('ram_allocation_ratio',
467 default=None,
468 min=0.0,
469 help="""
470Virtual RAM to physical RAM allocation ratio.
472This option is used to influence the hosts selected by the Placement API by
473configuring the allocation ratio for ``MEMORY_MB`` inventory.
475.. note::
477 If this option is set to something *other than* ``None`` or ``0.0``, the
478 allocation ratio will be overwritten by the value of this option, otherwise,
479 the allocation ratio will not change. Once set to a non-default value, it is
480 not possible to "unset" the config to get back to the default behavior. If
481 you want to reset back to the initial value, explicitly specify it to the
482 value of ``initial_ram_allocation_ratio``.
484Possible values:
486* Any valid positive integer or float value
488Related options:
490* ``initial_ram_allocation_ratio``
491"""),
492 cfg.FloatOpt('disk_allocation_ratio',
493 default=None,
494 min=0.0,
495 help="""
496Virtual disk to physical disk allocation ratio.
498This option is used to influence the hosts selected by the Placement API by
499configuring the allocation ratio for ``DISK_GB`` inventory.
501When configured, a ratio greater than 1.0 will result in over-subscription of
502the available physical disk, which can be useful for more efficiently packing
503instances created with images that do not use the entire virtual disk, such as
504sparse or compressed images. It can be set to a value between 0.0 and 1.0 in
505order to preserve a percentage of the disk for uses other than instances.
507.. note::
509 If the value is set to ``>1``, we recommend keeping track of the free disk
510 space, as the value approaching ``0`` may result in the incorrect
511 functioning of instances using it at the moment.
513.. note::
515 If this option is set to something *other than* ``None`` or ``0.0``, the
516 allocation ratio will be overwritten by the value of this option, otherwise,
517 the allocation ratio will not change. Once set to a non-default value, it is
518 not possible to "unset" the config to get back to the default behavior. If
519 you want to reset back to the initial value, explicitly specify it to the
520 value of ``initial_disk_allocation_ratio``.
522Possible values:
524* Any valid positive integer or float value
526Related options:
528* ``initial_disk_allocation_ratio``
529"""),
530 cfg.FloatOpt('initial_cpu_allocation_ratio',
531 default=4.0,
532 min=0.0,
533 help="""
534Initial virtual CPU to physical CPU allocation ratio.
536This is only used when initially creating the ``computes_nodes`` table record
537for a given nova-compute service.
539See https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html
540for more details and usage scenarios.
542Related options:
544* ``cpu_allocation_ratio``
545"""),
546 cfg.FloatOpt('initial_ram_allocation_ratio',
547 default=1.0,
548 min=0.0,
549 help="""
550Initial virtual RAM to physical RAM allocation ratio.
552This is only used when initially creating the ``computes_nodes`` table record
553for a given nova-compute service.
555See https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html
556for more details and usage scenarios.
558Related options:
560* ``ram_allocation_ratio``
561"""),
562 cfg.FloatOpt('initial_disk_allocation_ratio',
563 default=1.0,
564 min=0.0,
565 help="""
566Initial virtual disk to physical disk allocation ratio.
568This is only used when initially creating the ``computes_nodes`` table record
569for a given nova-compute service.
571See https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html
572for more details and usage scenarios.
574Related options:
576* ``disk_allocation_ratio``
577""")
578]
580compute_manager_opts = [
581 cfg.StrOpt('console_host',
582 default=socket.gethostname(),
583 sample_default="<current_hostname>",
584 help="""
585Console proxy host to be used to connect to instances on this host. It is the
586publicly visible name for the console host.
588Possible values:
590* Current hostname (default) or any string representing hostname.
591"""),
592 cfg.StrOpt('default_access_ip_network_name',
593 help="""
594Name of the network to be used to set access IPs for instances. If there are
595multiple IPs to choose from, an arbitrary one will be chosen.
597Possible values:
599* None (default)
600* Any string representing network name.
601"""),
602 cfg.StrOpt('instances_path',
603 default=paths.state_path_def('instances'),
604 sample_default="$state_path/instances",
605 help="""
606Specifies where instances are stored on the hypervisor's disk.
607It can point to locally attached storage or a directory on NFS.
609Possible values:
611* $state_path/instances where state_path is a config option that specifies
612 the top-level directory for maintaining nova's state. (default) or
613 Any string representing directory path.
615Related options:
617* ``[workarounds]/ensure_libvirt_rbd_instance_dir_cleanup``
618"""),
619 cfg.BoolOpt('instance_usage_audit',
620 default=False,
621 help="""
622This option enables periodic compute.instance.exists notifications. Each
623compute node must be configured to generate system usage data. These
624notifications are consumed by OpenStack Telemetry service.
625"""),
626 cfg.IntOpt('live_migration_retry_count',
627 default=30,
628 min=0,
629 advanced=True,
630 help="""
631This option controls the maximum number of attempts to plug VIFs on the
632destination host. Historically this could fail due to rate limiting in
633iptables. Operators should not need to modify this value from its default.
635Possible values:
637* Any positive integer representing retry count.
638"""),
639 cfg.BoolOpt('resume_guests_state_on_host_boot',
640 default=False,
641 help="""
642This option specifies whether to start guests that were running before the
643host rebooted. It ensures that all of the instances on a Nova compute node
644resume their state each time the compute node boots or restarts.
645"""),
646 cfg.IntOpt('network_allocate_retries',
647 default=0,
648 min=0,
649 help="""
650Number of times to retry network allocation. It is required to attempt network
651allocation retries if the virtual interface plug fails.
653Possible values:
655* Any positive integer representing retry count.
656"""),
657 cfg.IntOpt('max_concurrent_builds',
658 default=10,
659 min=0,
660 help="""
661Limits the maximum number of instance builds to run concurrently by
662nova-compute. Compute service can attempt to build an infinite number of
663instances, if asked to do so. This limit is enforced to avoid building
664unlimited instance concurrently on a compute node. This value can be set
665per compute node.
667Possible Values:
669* 0 : treated as unlimited.
670* Any positive integer representing maximum concurrent builds.
671"""),
672 cfg.IntOpt('max_concurrent_snapshots',
673 default=5,
674 min=0,
675 help="""
676Maximum number of instance snapshot operations to run concurrently.
677This limit is enforced to prevent snapshots overwhelming the
678host/network/storage and causing failure. This value can be set per
679compute node.
681Possible Values:
683* 0 : treated as unlimited.
684* Any positive integer representing maximum concurrent snapshots.
685"""),
686 cfg.IntOpt('max_concurrent_live_migrations',
687 default=1,
688 min=0,
689 help="""
690Maximum number of live migrations to run concurrently. This limit is enforced
691to avoid outbound live migrations overwhelming the host/network and causing
692failures. It is not recommended that you change this unless you are very sure
693that doing so is safe and stable in your environment.
695Possible values:
697* 0 : treated as unlimited.
698* Any positive integer representing maximum number of live migrations
699 to run concurrently.
700"""),
701 cfg.IntOpt('block_device_allocate_retries',
702 default=60,
703 min=0,
704 help="""
705The number of times to check for a volume to be "available" before attaching
706it during server create.
708When creating a server with block device mappings where ``source_type`` is
709one of ``blank``, ``image`` or ``snapshot`` and the ``destination_type`` is
710``volume``, the ``nova-compute`` service will create a volume and then attach
711it to the server. Before the volume can be attached, it must be in status
712"available". This option controls how many times to check for the created
713volume to be "available" before it is attached.
715If the operation times out, the volume will be deleted if the block device
716mapping ``delete_on_termination`` value is True.
718It is recommended to configure the image cache in the block storage service
719to speed up this operation. See
720https://docs.openstack.org/cinder/latest/admin/blockstorage-image-volume-cache.html
721for details.
723Possible values:
725* 60 (default)
726* If value is 0, then one attempt is made.
727* For any value > 0, total attempts are (value + 1)
729Related options:
731* ``block_device_allocate_retries_interval`` - controls the interval between
732 checks
733"""),
734 cfg.IntOpt('sync_power_state_pool_size',
735 default=1000,
736 help="""
737Number of greenthreads available for use to sync power states.
739This option can be used to reduce the number of concurrent requests
740made to the hypervisor or system with real instance power states
741for performance reasons, for example, with Ironic.
743Possible values:
745* Any positive integer representing greenthreads count.
746""")
747]
749compute_group_opts = [
750 cfg.IntOpt('consecutive_build_service_disable_threshold',
751 default=10,
752 help="""
753Enables reporting of build failures to the scheduler.
755Any nonzero value will enable sending build failure statistics to the
756scheduler for use by the BuildFailureWeigher.
758Possible values:
760* Any positive integer enables reporting build failures.
761* Zero to disable reporting build failures.
763Related options:
765* [filter_scheduler]/build_failure_weight_multiplier
767"""),
768 cfg.IntOpt("shutdown_retry_interval",
769 default=10,
770 min=1,
771 help="""
772Time to wait in seconds before resending an ACPI shutdown signal to
773instances.
775The overall time to wait is set by ``shutdown_timeout``.
777Possible values:
779* Any integer greater than 0 in seconds
781Related options:
783* ``shutdown_timeout``
784"""),
785 cfg.IntOpt('sharing_providers_max_uuids_per_request',
786 default=200,
787 min=1,
788 help="""
789Maximum number of aggregate UUIDs per API request. The default is 200.
791In deployments with a large number of aggregates, a 'Request-Too-Long'
792error may be raised by the web server or load balancer. This value
793allows setting the batch size to limit the query length.
795Possible values:
797* Any positive integer.
798"""),
799 cfg.IntOpt('resource_provider_association_refresh',
800 default=300,
801 min=0,
802 mutable=True,
803 # TODO(efried): Provide more/better explanation of what this option is
804 # all about. Reference bug(s). Unless we're just going to remove it.
805 help="""
806Interval for updating nova-compute-side cache of the compute node resource
807provider's inventories, aggregates, and traits.
809This option specifies the number of seconds between attempts to update a
810provider's inventories, aggregates and traits in the local cache of the compute
811node.
813A value of zero disables cache refresh completely.
815The cache can be cleared manually at any time by sending SIGHUP to the compute
816process, causing it to be repopulated the next time the data is accessed.
818Possible values:
820* Any positive integer in seconds, or zero to disable refresh.
821"""),
822 cfg.StrOpt('cpu_shared_set',
823 help="""
824Mask of host CPUs that can be used for ``VCPU`` resources and offloaded
825emulator threads.
827The behavior of this option depends on the definition of the deprecated
828``vcpu_pin_set`` option.
830* If ``vcpu_pin_set`` is not defined, ``[compute] cpu_shared_set`` will be be
831 used to provide ``VCPU`` inventory and to determine the host CPUs that
832 unpinned instances can be scheduled to. It will also be used to determine the
833 host CPUS that instance emulator threads should be offloaded to for instances
834 configured with the ``share`` emulator thread policy
835 (``hw:emulator_threads_policy=share``).
837* If ``vcpu_pin_set`` is defined, ``[compute] cpu_shared_set`` will only be
838 used to determine the host CPUs that instance emulator threads should be
839 offloaded to for instances configured with the ``share`` emulator thread
840 policy (``hw:emulator_threads_policy=share``). ``vcpu_pin_set`` will be used
841 to provide ``VCPU`` inventory and to determine the host CPUs that both pinned
842 and unpinned instances can be scheduled to.
844This behavior will be simplified in a future release when ``vcpu_pin_set`` is
845removed.
847Possible values:
849* A comma-separated list of physical CPU numbers that instance VCPUs can be
850 allocated from. Each element should be either a single CPU number, a range of
851 CPU numbers, or a caret followed by a CPU number to be excluded from a
852 previous range. For example::
854 cpu_shared_set = "4-12,^8,15"
856Related options:
858* ``[compute] cpu_dedicated_set``: This is the counterpart option for defining
859 where ``PCPU`` resources should be allocated from.
860* ``vcpu_pin_set``: A legacy option whose definition may change the behavior of
861 this option.
862"""),
863 cfg.StrOpt('cpu_dedicated_set',
864 help="""
865Mask of host CPUs that can be used for ``PCPU`` resources.
867The behavior of this option affects the behavior of the deprecated
868``vcpu_pin_set`` option.
870* If this option is defined, defining ``vcpu_pin_set`` will result in an error.
872* If this option is not defined, ``vcpu_pin_set`` will be used to determine
873 inventory for ``VCPU`` resources and to limit the host CPUs that both pinned
874 and unpinned instances can be scheduled to.
876This behavior will be simplified in a future release when ``vcpu_pin_set`` is
877removed.
879Possible values:
881* A comma-separated list of physical CPU numbers that instance VCPUs can be
882 allocated from. Each element should be either a single CPU number, a range of
883 CPU numbers, or a caret followed by a CPU number to be excluded from a
884 previous range. For example::
886 cpu_dedicated_set = "4-12,^8,15"
888Related options:
890* ``[compute] cpu_shared_set``: This is the counterpart option for defining
891 where ``VCPU`` resources should be allocated from.
892* ``vcpu_pin_set``: A legacy option that this option partially replaces.
893"""),
894 cfg.BoolOpt('live_migration_wait_for_vif_plug',
895 default=True,
896 help="""
897Determine if the source compute host should wait for a ``network-vif-plugged``
898event from the (neutron) networking service before starting the actual transfer
899of the guest to the destination compute host.
901Note that this option is read on the destination host of a live migration.
902If you set this option the same on all of your compute hosts, which you should
903do if you use the same networking backend universally, you do not have to
904worry about this.
906Before starting the transfer of the guest, some setup occurs on the destination
907compute host, including plugging virtual interfaces. Depending on the
908networking backend **on the destination host**, a ``network-vif-plugged``
909event may be triggered and then received on the source compute host and the
910source compute can wait for that event to ensure networking is set up on the
911destination host before starting the guest transfer in the hypervisor.
913.. note::
915 The compute service cannot reliably determine which types of virtual
916 interfaces (``port.binding:vif_type``) will send ``network-vif-plugged``
917 events without an accompanying port ``binding:host_id`` change.
918 Open vSwitch and linuxbridge should be OK, but OpenDaylight is at least
919 one known backend that will not currently work in this case, see bug
920 https://launchpad.net/bugs/1755890 for more details.
922Possible values:
924* True: wait for ``network-vif-plugged`` events before starting guest transfer
925* False: do not wait for ``network-vif-plugged`` events before starting guest
926 transfer (this is the legacy behavior)
928Related options:
930* [DEFAULT]/vif_plugging_is_fatal: if ``live_migration_wait_for_vif_plug`` is
931 True and ``vif_plugging_timeout`` is greater than 0, and a timeout is
932 reached, the live migration process will fail with an error but the guest
933 transfer will not have started to the destination host
934* [DEFAULT]/vif_plugging_timeout: if ``live_migration_wait_for_vif_plug`` is
935 True, this controls the amount of time to wait before timing out and either
936 failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the
937 live migration
938"""),
939 cfg.IntOpt('max_concurrent_disk_ops',
940 default=0,
941 min=0,
942 help="""
943Number of concurrent disk-IO-intensive operations (glance image downloads,
944image format conversions, etc.) that we will do in parallel. If this is set
945too high then response time suffers.
946The default value of 0 means no limit.
947 """),
948 cfg.IntOpt('max_disk_devices_to_attach',
949 default=-1,
950 min=-1,
951 help="""
952Maximum number of disk devices allowed to attach to a single server. Note
953that the number of disks supported by an server depends on the bus used. For
954example, the ``ide`` disk bus is limited to 4 attached devices. The configured
955maximum is enforced during server create, rebuild, evacuate, unshelve, live
956migrate, and attach volume.
958Usually, disk bus is determined automatically from the device type or disk
959device, and the virtualization type. However, disk bus
960can also be specified via a block device mapping or an image property.
961See the ``disk_bus`` field in :doc:`/user/block-device-mapping` for more
962information about specifying disk bus in a block device mapping, and
963see https://docs.openstack.org/glance/latest/admin/useful-image-properties.html
964for more information about the ``hw_disk_bus`` image property.
966Operators changing the ``[compute]/max_disk_devices_to_attach`` on a compute
967service that is hosting servers should be aware that it could cause rebuilds to
968fail, if the maximum is decreased lower than the number of devices already
969attached to servers. For example, if server A has 26 devices attached and an
970operators changes ``[compute]/max_disk_devices_to_attach`` to 20, a request to
971rebuild server A will fail and go into ERROR state because 26 devices are
972already attached and exceed the new configured maximum of 20.
974Operators setting ``[compute]/max_disk_devices_to_attach`` should also be aware
975that during a cold migration, the configured maximum is only enforced in-place
976and the destination is not checked before the move. This means if an operator
977has set a maximum of 26 on compute host A and a maximum of 20 on compute host
978B, a cold migration of a server with 26 attached devices from compute host A to
979compute host B will succeed. Then, once the server is on compute host B, a
980subsequent request to rebuild the server will fail and go into ERROR state
981because 26 devices are already attached and exceed the configured maximum of 20
982on compute host B.
984The configured maximum is not enforced on shelved offloaded servers, as they
985have no compute host.
987.. warning:: If this option is set to 0, the ``nova-compute`` service will fail
988 to start, as 0 disk devices is an invalid configuration that would
989 prevent instances from being able to boot.
991Possible values:
993* -1 means unlimited
994* Any integer >= 1 represents the maximum allowed. A value of 0 will cause the
995 ``nova-compute`` service to fail to start, as 0 disk devices is an invalid
996 configuration that would prevent instances from being able to boot.
997"""),
998 cfg.StrOpt('provider_config_location',
999 default='/etc/nova/provider_config/',
1000 help="""
1001Location of YAML files containing resource provider configuration data.
1003These files allow the operator to specify additional custom inventory and
1004traits to assign to one or more resource providers.
1006Additional documentation is available here:
1008 https://docs.openstack.org/nova/latest/admin/managing-resource-providers.html
1010"""),
1011 cfg.ListOpt('image_type_exclude_list',
1012 default=[],
1013 help="""
1014A list of image formats that should not be advertised as supported by this
1015compute node.
1017In some situations, it may be desirable to have a compute node
1018refuse to support an expensive or complex image format. This factors into
1019the decisions made by the scheduler about which compute node to select when
1020booted with a given image.
1022Possible values:
1024* Any glance image ``disk_format`` name (i.e. ``raw``, ``qcow2``, etc)
1026Related options:
1028* ``[scheduler]query_placement_for_image_type_support`` - enables
1029 filtering computes based on supported image types, which is required
1030 to be enabled for this to take effect.
1031"""),
1032 cfg.ListOpt('vmdk_allowed_types',
1033 default=['streamOptimized', 'monolithicSparse'],
1034 help="""
1035A list of strings describing allowed VMDK "create-type" subformats
1036that will be allowed. This is recommended to only include
1037single-file-with-sparse-header variants to avoid potential host file
1038exposure due to processing named extents. If this list is empty, then no
1039form of VMDK image will be allowed.
1040"""),
1041 cfg.BoolOpt('packing_host_numa_cells_allocation_strategy',
1042 default=False,
1043 help="""
1044This option controls allocation strategy used to choose NUMA cells on host for
1045placing VM's NUMA cells (for VMs with defined numa topology). By
1046default host's NUMA cell with more resources consumed will be chosen last for
1047placing attempt. When the packing_host_numa_cells_allocation_strategy variable
1048is set to ``False``, host's NUMA cell with more resources available will be
1049used. When set to ``True`` cells with some usage will be packed with VM's cell
1050until it will be completely exhausted, before a new free host's cell will be
1051used.
1053Possible values:
1055* ``True``: Packing VM's NUMA cell on most used host NUMA cell.
1056* ``False``: Spreading VM's NUMA cell on host's NUMA cells with more resources
1057 available.
1058"""),
1059]
1061interval_opts = [
1062 cfg.IntOpt('sync_power_state_interval',
1063 default=600,
1064 help="""
1065Interval to sync power states between the database and the hypervisor.
1067The interval that Nova checks the actual virtual machine power state
1068and the power state that Nova has in its database. If a user powers
1069down their VM, Nova updates the API to report the VM has been
1070powered down. Should something turn on the VM unexpectedly,
1071Nova will turn the VM back off to keep the system in the expected
1072state.
1074Possible values:
1076* 0: Will run at the default periodic interval.
1077* Any value < 0: Disables the option.
1078* Any positive integer in seconds.
1080Related options:
1082* If ``handle_virt_lifecycle_events`` in the ``workarounds`` group is
1083 false and this option is negative, then instances that get out
1084 of sync between the hypervisor and the Nova database will have
1085 to be synchronized manually.
1086"""),
1087 cfg.IntOpt('heal_instance_info_cache_interval',
1088 default=-1,
1089 help="""
1090Interval between instance network information cache updates.
1092Number of seconds after which each compute node runs the task of
1093querying Neutron for all of its instances networking information,
1094then updates the Nova db with that information. Nova will not
1095update it's cache periodically if this option is set to <= 0. Nova will still
1096react to network-changed external events to update its cache.
1097Each in tree neutron backend is sending network-changed external events to
1098update nova's view. So in these deployment scenarios this periodic is safe to
1099be turned off.
1100Out of tree neutron backends might not send this event and if the cache is not
1101up to date, then the metadata service and nova-api endpoints will be proxying
1102incorrect network data about the instance. So for these backends it is not
1103recommended to turn the periodic off.
1105Possible values:
1107* Any positive integer in seconds.
1108* Any value <=0 will disable the sync.
1109"""),
1110 cfg.IntOpt('reclaim_instance_interval',
1111 default=0,
1112 help="""
1113Interval for reclaiming deleted instances.
1115A value greater than 0 will enable SOFT_DELETE of instances.
1116This option decides whether the server to be deleted will be put into
1117the SOFT_DELETED state. If this value is greater than 0, the deleted
1118server will not be deleted immediately, instead it will be put into
1119a queue until it's too old (deleted time greater than the value of
1120reclaim_instance_interval). The server can be recovered from the
1121delete queue by using the restore action. If the deleted server remains
1122longer than the value of reclaim_instance_interval, it will be
1123deleted by a periodic task in the compute service automatically.
1125Note that this option is read from both the API and compute nodes, and
1126must be set globally otherwise servers could be put into a soft deleted
1127state in the API and never actually reclaimed (deleted) on the compute
1128node.
1130.. note:: When using this option, you should also configure the ``[cinder]``
1131 auth options, e.g. ``auth_type``, ``auth_url``, ``username``, etc.
1132 Since the reclaim happens in a periodic task, there is no user token
1133 to cleanup volumes attached to any SOFT_DELETED servers so nova must
1134 be configured with administrator role access to cleanup those
1135 resources in cinder.
1137Possible values:
1139* Any positive integer(in seconds) greater than 0 will enable
1140 this option.
1141* Any value <=0 will disable the option.
1143Related options:
1145* [cinder] auth options for cleaning up volumes attached to servers during
1146 the reclaim process
1147"""),
1148 cfg.IntOpt('volume_usage_poll_interval',
1149 default=0,
1150 help="""
1151Interval for gathering volume usages.
1153This option updates the volume usage cache for every
1154volume_usage_poll_interval number of seconds.
1156Possible values:
1158* Any positive integer(in seconds) greater than 0 will enable
1159 this option.
1160* Any value <=0 will disable the option.
1161"""),
1162 cfg.IntOpt('shelved_poll_interval',
1163 default=3600,
1164 help="""
1165Interval for polling shelved instances to offload.
1167The periodic task runs for every shelved_poll_interval number
1168of seconds and checks if there are any shelved instances. If it
1169finds a shelved instance, based on the 'shelved_offload_time' config
1170value it offloads the shelved instances. Check 'shelved_offload_time'
1171config option description for details.
1173Possible values:
1175* Any value <= 0: Disables the option.
1176* Any positive integer in seconds.
1178Related options:
1180* ``shelved_offload_time``
1181"""),
1182 cfg.IntOpt('shelved_offload_time',
1183 default=0,
1184 help="""
1185Time before a shelved instance is eligible for removal from a host.
1187By default this option is set to 0 and the shelved instance will be
1188removed from the hypervisor immediately after shelve operation.
1189Otherwise, the instance will be kept for the value of
1190shelved_offload_time(in seconds) so that during the time period the
1191unshelve action will be faster, then the periodic task will remove
1192the instance from hypervisor after shelved_offload_time passes.
1194Possible values:
1196* 0: Instance will be immediately offloaded after being
1197 shelved.
1198* Any value < 0: An instance will never offload.
1199* Any positive integer in seconds: The instance will exist for
1200 the specified number of seconds before being offloaded.
1201"""),
1202 # NOTE(melwitt): We're also using this option as the interval for cleaning
1203 # up expired console authorizations from the database. It's related to the
1204 # delete_instance_interval in that it's another task for cleaning up
1205 # resources related to an instance.
1206 cfg.IntOpt('instance_delete_interval',
1207 default=300,
1208 help="""
1209Interval for retrying failed instance file deletes.
1211This option depends on 'maximum_instance_delete_attempts'.
1212This option specifies how often to retry deletes whereas
1213'maximum_instance_delete_attempts' specifies the maximum number
1214of retry attempts that can be made.
1216Possible values:
1218* 0: Will run at the default periodic interval.
1219* Any value < 0: Disables the option.
1220* Any positive integer in seconds.
1222Related options:
1224* ``maximum_instance_delete_attempts`` from instance_cleaning_opts
1225 group.
1226"""),
1227 cfg.IntOpt('block_device_allocate_retries_interval',
1228 default=3,
1229 min=0,
1230 help="""
1231Interval (in seconds) between block device allocation retries on failures.
1233This option allows the user to specify the time interval between
1234consecutive retries. The ``block_device_allocate_retries`` option specifies
1235the maximum number of retries.
1237Possible values:
1239* 0: Disables the option.
1240* Any positive integer in seconds enables the option.
1242Related options:
1244* ``block_device_allocate_retries`` - controls the number of retries
1245"""),
1246 cfg.IntOpt('scheduler_instance_sync_interval',
1247 default=120,
1248 help="""
1249Interval between sending the scheduler a list of current instance UUIDs to
1250verify that its view of instances is in sync with nova.
1252If the CONF option 'scheduler_tracks_instance_changes' is
1253False, the sync calls will not be made. So, changing this option will
1254have no effect.
1256If the out of sync situations are not very common, this interval
1257can be increased to lower the number of RPC messages being sent.
1258Likewise, if sync issues turn out to be a problem, the interval
1259can be lowered to check more frequently.
1261Possible values:
1263* 0: Will run at the default periodic interval.
1264* Any value < 0: Disables the option.
1265* Any positive integer in seconds.
1267Related options:
1269* This option has no impact if ``scheduler_tracks_instance_changes``
1270 is set to False.
1271"""),
1272 cfg.IntOpt('update_resources_interval',
1273 default=0,
1274 help="""
1275Interval for updating compute resources.
1277This option specifies how often the update_available_resource
1278periodic task should run. A number less than 0 means to disable the
1279task completely. Leaving this at the default of 0 will cause this to
1280run at the default periodic interval. Setting it to any positive
1281value will cause it to run at approximately that number of seconds.
1283Possible values:
1285* 0: Will run at the default periodic interval.
1286* Any value < 0: Disables the option.
1287* Any positive integer in seconds.
1288""")
1289]
1291timeout_opts = [
1292 cfg.IntOpt("reboot_timeout",
1293 default=0,
1294 min=0,
1295 help="""
1296Time interval after which an instance is hard rebooted automatically.
1298When doing a soft reboot, it is possible that a guest kernel is
1299completely hung in a way that causes the soft reboot task
1300to not ever finish. Setting this option to a time period in seconds
1301will automatically hard reboot an instance if it has been stuck
1302in a rebooting state longer than N seconds.
1304Possible values:
1306* 0: Disables the option (default).
1307* Any positive integer in seconds: Enables the option.
1308"""),
1309 cfg.IntOpt("instance_build_timeout",
1310 default=0,
1311 min=0,
1312 help="""
1313Maximum time in seconds that an instance can take to build.
1315If this timer expires, instance status will be changed to ERROR.
1316Enabling this option will make sure an instance will not be stuck
1317in BUILD state for a longer period.
1319Possible values:
1321* 0: Disables the option (default)
1322* Any positive integer in seconds: Enables the option.
1323"""),
1324 cfg.IntOpt("rescue_timeout",
1325 default=0,
1326 min=0,
1327 help="""
1328Interval to wait before un-rescuing an instance stuck in RESCUE.
1330Possible values:
1332* 0: Disables the option (default)
1333* Any positive integer in seconds: Enables the option.
1334"""),
1335 cfg.IntOpt("resize_confirm_window",
1336 default=0,
1337 min=0,
1338 help="""
1339Automatically confirm resizes after N seconds.
1341Resize functionality will save the existing server before resizing.
1342After the resize completes, user is requested to confirm the resize.
1343The user has the opportunity to either confirm or revert all
1344changes. Confirm resize removes the original server and changes
1345server status from resized to active. Setting this option to a time
1346period (in seconds) will automatically confirm the resize if the
1347server is in resized state longer than that time.
1349Possible values:
1351* 0: Disables the option (default)
1352* Any positive integer in seconds: Enables the option.
1353"""),
1354 cfg.IntOpt("shutdown_timeout",
1355 default=60,
1356 min=0,
1357 help="""
1358Total time to wait in seconds for an instance to perform a clean
1359shutdown.
1361It determines the overall period (in seconds) a VM is allowed to
1362perform a clean shutdown. While performing stop, rescue and shelve,
1363rebuild operations, configuring this option gives the VM a chance
1364to perform a controlled shutdown before the instance is powered off.
1365The default timeout is 60 seconds. A value of 0 (zero) means the guest
1366will be powered off immediately with no opportunity for guest OS clean-up.
1368The timeout value can be overridden on a per image basis by means
1369of os_shutdown_timeout that is an image metadata setting allowing
1370different types of operating systems to specify how much time they
1371need to shut down cleanly.
1373Possible values:
1375* A positive integer or 0 (default value is 60).
1376""")
1377]
1379running_deleted_opts = [
1380 cfg.StrOpt("running_deleted_instance_action",
1381 default="reap",
1382 choices=[
1383 ('reap', 'Powers down the instances and deletes them'),
1384 ('log', 'Logs warning message about deletion of the resource'),
1385 ('shutdown', 'Powers down instances and marks them as '
1386 'non-bootable which can be later used for debugging/analysis'),
1387 ('noop', 'Takes no action'),
1388 ],
1389 help="""
1390The compute service periodically checks for instances that have been
1391deleted in the database but remain running on the compute node. The
1392above option enables action to be taken when such instances are
1393identified.
1395Related options:
1397* ``running_deleted_instance_poll_interval``
1398* ``running_deleted_instance_timeout``
1399"""),
1400 cfg.IntOpt("running_deleted_instance_poll_interval",
1401 default=1800,
1402 help="""
1403Time interval in seconds to wait between runs for the clean up action.
1404If set to 0, above check will be disabled. If "running_deleted_instance
1405_action" is set to "log" or "reap", a value greater than 0 must be set.
1407Possible values:
1409* Any positive integer in seconds enables the option.
1410* 0: Disables the option.
1411* 1800: Default value.
1413Related options:
1415* running_deleted_instance_action
1416"""),
1417 cfg.IntOpt("running_deleted_instance_timeout",
1418 default=0,
1419 help="""
1420Time interval in seconds to wait for the instances that have
1421been marked as deleted in database to be eligible for cleanup.
1423Possible values:
1425* Any positive integer in seconds(default is 0).
1427Related options:
1429* "running_deleted_instance_action"
1430"""),
1431]
1433instance_cleaning_opts = [
1434 cfg.IntOpt('maximum_instance_delete_attempts',
1435 default=5,
1436 min=1,
1437 help="""
1438The number of times to attempt to reap an instance's files.
1440This option specifies the maximum number of retry attempts
1441that can be made.
1443Possible values:
1445* Any positive integer defines how many attempts are made.
1447Related options:
1449* ``[DEFAULT] instance_delete_interval`` can be used to disable this option.
1450""")
1451]
1453db_opts = [
1454 cfg.StrOpt('osapi_compute_unique_server_name_scope',
1455 default='',
1456 choices=[
1457 ('', 'An empty value means that no uniqueness check is done and '
1458 'duplicate names are possible'),
1459 ('project', 'The instance name check is done only for instances '
1460 'within the same project'),
1461 ('global', 'The instance name check is done for all instances '
1462 'regardless of the project'),
1463 ],
1464 help="""
1465Sets the scope of the check for unique instance names.
1467The default doesn't check for unique names. If a scope for the name check is
1468set, a launch of a new instance or an update of an existing instance with a
1469duplicate name will result in an ''InstanceExists'' error. The uniqueness is
1470case-insensitive. Setting this option can increase the usability for end
1471users as they don't have to distinguish among instances with the same name
1472by their IDs.
1473"""),
1474 cfg.BoolOpt('enable_new_services',
1475 default=True,
1476 help="""
1477Enable new nova-compute services on this host automatically.
1479When a new nova-compute service starts up, it gets
1480registered in the database as an enabled service. Sometimes it can be useful
1481to register new compute services in disabled state and then enabled them at a
1482later point in time. This option only sets this behavior for nova-compute
1483services, it does not auto-disable other services like nova-conductor,
1484nova-scheduler, or nova-osapi_compute.
1486Possible values:
1488* ``True``: Each new compute service is enabled as soon as it registers itself.
1489* ``False``: Compute services must be enabled via an os-services REST API call
1490 or with the CLI with ``nova service-enable <hostname> <binary>``, otherwise
1491 they are not ready to use.
1492"""),
1493 cfg.StrOpt('instance_name_template',
1494 default='instance-%08x',
1495 help="""
1496Template string to be used to generate instance names.
1498This template controls the creation of the database name of an instance. This
1499is *not* the display name you enter when creating an instance (via Horizon
1500or CLI). For a new deployment it is advisable to change the default value
1501(which uses the database autoincrement) to another value which makes use
1502of the attributes of an instance, like ``instance-%(uuid)s``. If you
1503already have instances in your deployment when you change this, your
1504deployment will break.
1506Possible values:
1508* A string which either uses the instance database ID (like the
1509 default)
1510* A string with a list of named database columns, for example ``%(id)d``
1511 or ``%(uuid)s`` or ``%(hostname)s``.
1512"""),
1513]
1516ALL_OPTS = (compute_opts +
1517 resource_tracker_opts +
1518 allocation_ratio_opts +
1519 compute_manager_opts +
1520 interval_opts +
1521 timeout_opts +
1522 running_deleted_opts +
1523 instance_cleaning_opts +
1524 db_opts)
1527def register_opts(conf):
1528 conf.register_opts(ALL_OPTS)
1529 conf.register_group(compute_group)
1530 conf.register_opts(compute_group_opts, group=compute_group)
1533def list_opts():
1534 return {'DEFAULT': ALL_OPTS,
1535 'compute': compute_group_opts}