Coverage for nova/limit/placement.py: 92%
109 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
1# Copyright 2022 StackHPC
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
15import typing as ty
17import os_resource_classes as orc
18from oslo_limit import exception as limit_exceptions
19from oslo_limit import limit
20from oslo_log import log as logging
22import nova.conf
23from nova import exception
24from nova.limit import utils as limit_utils
25from nova import objects
26from nova import quota
27from nova.scheduler.client import report
28from nova.scheduler import utils
30LOG = logging.getLogger(__name__)
31CONF = nova.conf.CONF
33# Cache to avoid repopulating ksa state
34PLACEMENT_CLIENT = None
36LEGACY_LIMITS = {
37 "servers": "instances",
38 "class:VCPU": "cores",
39 "class:MEMORY_MB": "ram",
40}
43def _get_placement_usages(
44 context: 'nova.context.RequestContext', project_id: str
45) -> ty.Dict[str, int]:
46 return report.report_client_singleton().get_usages_counts_for_limits(
47 context, project_id)
50def _get_usage(
51 context: 'nova.context.RequestContext',
52 project_id: str,
53 resource_names: ty.List[str],
54) -> ty.Dict[str, int]:
55 """Called by oslo_limit's enforcer"""
56 if not limit_utils.use_unified_limits():
57 raise NotImplementedError("Unified limits support is disabled")
59 count_servers = False
60 resource_classes = []
62 for resource in resource_names:
63 if resource == "servers":
64 count_servers = True
65 continue
67 if not resource.startswith("class:"):
68 raise ValueError("Unknown resource type: %s" % resource)
70 # Temporarily strip resource class prefix as placement does not use it.
71 # Example: limit resource 'class:VCPU' will be returned as 'VCPU' from
72 # placement.
73 r_class = resource.lstrip("class:")
74 if r_class in orc.STANDARDS or orc.is_custom(r_class):
75 resource_classes.append(r_class)
76 else:
77 raise ValueError("Unknown resource class: %s" % r_class)
79 if not count_servers and len(resource_classes) == 0:
80 raise ValueError("no resources to check")
82 resource_counts = {}
83 if count_servers: 83 ↛ 94line 83 didn't jump to line 94 because the condition on line 83 was always true
84 # TODO(melwitt): Change this to count servers from placement once nova
85 # is using placement consumer types and is able to differentiate
86 # between "instance" allocations vs "migration" allocations.
87 if not quota.is_qfd_populated(context):
88 LOG.error('Must migrate all instance mappings before using '
89 'unified limits')
90 raise ValueError("must first migrate instance mappings")
91 mappings = objects.InstanceMappingList.get_counts(context, project_id)
92 resource_counts['servers'] = mappings['project']['instances']
94 try:
95 usages = _get_placement_usages(context, project_id)
96 except exception.UsagesRetrievalFailed as e:
97 msg = ("Failed to retrieve usages from placement while enforcing "
98 "%s quota limits." % ", ".join(resource_names))
99 LOG.error(msg + " Error: " + str(e))
100 raise exception.UsagesRetrievalFailed(msg)
102 # Use legacy behavior VCPU = VCPU + PCPU if configured.
103 if CONF.workarounds.unified_limits_count_pcpu_as_vcpu:
104 # If PCPU is in resource_classes, that means it was specified in the
105 # flavor explicitly. In that case, we expect it to have its own limit
106 # registered and we should not fold it into VCPU.
107 if orc.PCPU in usages and orc.PCPU not in resource_classes:
108 usages[orc.VCPU] = (usages.get(orc.VCPU, 0) +
109 usages.get(orc.PCPU, 0))
111 for resource_class in resource_classes:
112 # Need to add back resource class prefix that was stripped earlier
113 resource_name = 'class:' + resource_class
114 # Placement doesn't know about classes with zero usage
115 # so default to zero to tell oslo.limit usage is zero
116 resource_counts[resource_name] = usages.get(resource_class, 0)
118 return resource_counts
121def _get_deltas_by_flavor(
122 flavor: 'objects.Flavor', is_bfv: bool, count: int
123) -> ty.Dict[str, int]:
124 if flavor is None: 124 ↛ 125line 124 didn't jump to line 125 because the condition on line 124 was never true
125 raise ValueError("flavor")
126 if count < 0: 126 ↛ 127line 126 didn't jump to line 127 because the condition on line 126 was never true
127 raise ValueError("count")
129 # NOTE(johngarbutt): this skips bfv, port, and cyborg resources
130 # but it still gives us better checks than before unified limits
131 # We need an instance in the DB to use the current is_bfv logic
132 # which doesn't work well for instances that don't yet have a uuid
133 deltas_from_flavor = utils.resources_for_limits(flavor, is_bfv)
135 deltas = {"servers": count}
136 for resource, amount in deltas_from_flavor.items():
137 if amount != 0: 137 ↛ 136line 137 didn't jump to line 136 because the condition on line 137 was always true
138 deltas["class:%s" % resource] = amount * count
139 return deltas
142def _get_enforcer(
143 context: 'nova.context.RequestContext', project_id: str
144) -> limit.Enforcer:
145 # NOTE(johngarbutt) should we move context arg into oslo.limit?
146 def callback(project_id, resource_names):
147 return _get_usage(context, project_id, resource_names)
149 return limit.Enforcer(callback)
152def enforce_num_instances_and_flavor(
153 context: 'nova.context.RequestContext',
154 project_id: str,
155 flavor: 'objects.Flavor',
156 is_bfvm: bool,
157 min_count: int,
158 max_count: int,
159 enforcer: ty.Optional[limit.Enforcer] = None,
160 delta_updates: ty.Optional[ty.Dict[str, int]] = None,
161) -> int:
162 """Return max instances possible, else raise TooManyInstances exception."""
163 if not limit_utils.use_unified_limits():
164 return max_count
166 # Ensure the recursion will always complete
167 if min_count < 0 or min_count > max_count:
168 raise ValueError("invalid min_count")
169 if max_count < 0: 169 ↛ 170line 169 didn't jump to line 170 because the condition on line 169 was never true
170 raise ValueError("invalid max_count")
172 deltas = _get_deltas_by_flavor(flavor, is_bfvm, max_count)
173 if delta_updates: 173 ↛ 174line 173 didn't jump to line 174 because the condition on line 173 was never true
174 deltas.update(delta_updates)
176 enforcer = enforcer or _get_enforcer(context, project_id)
177 try:
178 enforcer.enforce(project_id, deltas)
179 except limit_exceptions.ProjectOverLimit as e:
180 if limit_utils.should_enforce(e): 180 ↛ 195line 180 didn't jump to line 195 because the condition on line 180 was always true
181 # NOTE(johngarbutt) we can do better, but this is very simple
182 LOG.debug(
183 "Limit check failed with count %s retrying with count %s",
184 max_count, max_count - 1)
185 try:
186 return enforce_num_instances_and_flavor(
187 context, project_id, flavor, is_bfvm, min_count,
188 max_count - 1, enforcer=enforcer)
189 except ValueError:
190 # Copy the *original* exception message to a OverQuota to
191 # propagate to the API layer
192 raise exception.TooManyInstances(str(e))
194 # no problems with max_count, so we return max count
195 return max_count
198def _convert_keys_to_legacy_name(new_dict):
199 legacy = {}
200 for new_name, old_name in LEGACY_LIMITS.items():
201 # defensive in case oslo or keystone doesn't give us an answer
202 legacy[old_name] = new_dict.get(new_name) or 0
203 return legacy
206def get_legacy_default_limits():
207 enforcer = limit.Enforcer(lambda: None)
208 new_limits = enforcer.get_registered_limits(LEGACY_LIMITS.keys())
209 return _convert_keys_to_legacy_name(dict(new_limits))
212def get_legacy_project_limits(project_id):
213 enforcer = limit.Enforcer(lambda: None)
214 new_limits = enforcer.get_project_limits(project_id, LEGACY_LIMITS.keys())
215 return _convert_keys_to_legacy_name(dict(new_limits))
218def get_legacy_counts(context, project_id):
219 resource_names = list(LEGACY_LIMITS.keys())
220 resource_names.sort()
221 new_usage = _get_usage(context, project_id, resource_names)
222 return _convert_keys_to_legacy_name(new_usage)