Coverage for nova/limit/local.py: 98%
85 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-17 15:08 +0000
1# Copyright 2022 StackHPC
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
15import functools
16import typing as ty
18from oslo_limit import exception as limit_exceptions
19from oslo_limit import limit
20from oslo_log import log as logging
22import nova.conf
23from nova import exception
24from nova.limit import utils as nova_limit_utils
25from nova import objects
27LOG = logging.getLogger(__name__)
28CONF = nova.conf.CONF
30# Entity types for API Limits, same as names of config options prefixed with
31# "server_" to disambiguate them in keystone
32SERVER_METADATA_ITEMS = "server_metadata_items"
33INJECTED_FILES = "server_injected_files"
34INJECTED_FILES_CONTENT = "server_injected_file_content_bytes"
35INJECTED_FILES_PATH = "server_injected_file_path_bytes"
36API_LIMITS = set([
37 SERVER_METADATA_ITEMS,
38 INJECTED_FILES,
39 INJECTED_FILES_CONTENT,
40 INJECTED_FILES_PATH,
41])
43# Entity types for all DB limits, same as names of config options prefixed with
44# "server_" to disambiguate them in keystone
45KEY_PAIRS = "server_key_pairs"
46SERVER_GROUPS = "server_groups"
47SERVER_GROUP_MEMBERS = "server_group_members"
48DB_LIMITS = set([
49 KEY_PAIRS,
50 SERVER_GROUPS,
51 SERVER_GROUP_MEMBERS,
52])
54# Checks only happen when we are using the unified limits driver
55UNIFIED_LIMITS_DRIVER = "nova.quota.UnifiedLimitsDriver"
57# Map entity types to the exception we raise in the case that the resource is
58# over the allowed limit. Each of these should be a subclass of
59# exception.OverQuota.
60EXCEPTIONS = {
61 KEY_PAIRS: exception.KeypairLimitExceeded,
62 INJECTED_FILES_CONTENT: exception.OnsetFileContentLimitExceeded,
63 INJECTED_FILES_PATH: exception.OnsetFilePathLimitExceeded,
64 INJECTED_FILES: exception.OnsetFileLimitExceeded,
65 SERVER_METADATA_ITEMS: exception.MetadataLimitExceeded,
66 SERVER_GROUPS: exception.ServerGroupLimitExceeded,
67 SERVER_GROUP_MEMBERS: exception.GroupMemberLimitExceeded,
68}
70# Map new limit-based quota names to the legacy ones.
71LEGACY_LIMITS = {
72 SERVER_METADATA_ITEMS: "metadata_items",
73 INJECTED_FILES: "injected_files",
74 INJECTED_FILES_CONTENT: "injected_file_content_bytes",
75 INJECTED_FILES_PATH: "injected_file_path_bytes",
76 KEY_PAIRS: "key_pairs",
77 SERVER_GROUPS: SERVER_GROUPS,
78 SERVER_GROUP_MEMBERS: SERVER_GROUP_MEMBERS,
79}
82def get_in_use(
83 context: 'nova.context.RequestContext', project_id: str
84) -> ty.Dict[str, int]:
85 """Returns in use counts for each resource, for given project.
87 This sounds simple but many resources can't be counted per project,
88 so the only sensible value is 0. For example, key pairs are counted
89 per user, and server group members are counted per server group,
90 and metadata items are counted per server.
91 This behaviour is consistent with what is returned today by the
92 DB based quota driver.
93 """
94 count = _server_group_count(context, project_id)['server_groups']
95 usages = {
96 # DB limits
97 SERVER_GROUPS: count,
98 SERVER_GROUP_MEMBERS: 0,
99 KEY_PAIRS: 0,
100 # API limits
101 SERVER_METADATA_ITEMS: 0,
102 INJECTED_FILES: 0,
103 INJECTED_FILES_CONTENT: 0,
104 INJECTED_FILES_PATH: 0,
105 }
106 return _convert_keys_to_legacy_name(usages)
109def always_zero_usage(
110 project_id: str, resource_names: ty.List[str]
111) -> ty.Dict[str, int]:
112 """Called by oslo_limit's enforcer"""
113 # Return usage of 0 for API limits. Values in API requests will be used as
114 # the deltas.
115 return {resource_name: 0 for resource_name in resource_names}
118def enforce_api_limit(entity_type: str, count: int) -> None:
119 """Check if the values given are over the limit for that key.
121 This is generally used for limiting the size of certain API requests
122 that eventually get stored in the database.
123 """
124 if not nova_limit_utils.use_unified_limits():
125 return
127 if entity_type not in API_LIMITS:
128 fmt = "%s is not a valid API limit: %s"
129 raise ValueError(fmt % (entity_type, API_LIMITS))
131 try:
132 enforcer = limit.Enforcer(always_zero_usage)
133 except limit_exceptions.SessionInitError as e:
134 msg = ("Failed to connect to keystone while enforcing %s quota limit."
135 % entity_type)
136 LOG.error(msg + " Error: " + str(e))
137 raise exception.KeystoneConnectionFailed(msg)
139 try:
140 enforcer.enforce(None, {entity_type: count})
141 except limit_exceptions.ProjectOverLimit as e:
142 if nova_limit_utils.should_enforce(e): 142 ↛ exitline 142 didn't return from function 'enforce_api_limit' because the condition on line 142 was always true
143 # Copy the exception message to a OverQuota to propagate to the
144 # API layer.
145 raise EXCEPTIONS.get(entity_type, exception.OverQuota)(str(e))
148def enforce_db_limit(
149 context: 'nova.context.RequestContext',
150 entity_type: str,
151 entity_scope: ty.Any,
152 delta: int
153) -> None:
154 """Check provided delta does not put resource over limit.
156 Firstly we count the current usage given the specified scope.
157 We then add that count to the specified delta to see if we
158 are over the limit for that kind of entity.
160 Note previously we used to recheck these limits.
161 However these are really soft DDoS protections,
162 not hard resource limits, so we don't do the recheck for these.
164 The scope is specific to the limit type:
165 * key_pairs scope is context.user_id
166 * server_groups scope is context.project_id
167 * server_group_members scope is server_group_uuid
168 """
169 if not nova_limit_utils.use_unified_limits():
170 return
172 if entity_type not in DB_COUNT_FUNCTION.keys():
173 fmt = "%s does not have a DB count function defined: %s"
174 raise ValueError(fmt % (entity_type, DB_COUNT_FUNCTION.keys()))
175 if delta < 0:
176 raise ValueError("delta must be a positive integer")
178 count_function = DB_COUNT_FUNCTION[entity_type]
180 try:
181 enforcer = limit.Enforcer(
182 functools.partial(count_function, context, entity_scope))
183 except limit_exceptions.SessionInitError as e:
184 msg = ("Failed to connect to keystone while enforcing %s quota limit."
185 % entity_type)
186 LOG.error(msg + " Error: " + str(e))
187 raise exception.KeystoneConnectionFailed(msg)
189 try:
190 enforcer.enforce(None, {entity_type: delta})
191 except limit_exceptions.ProjectOverLimit as e:
192 if nova_limit_utils.should_enforce(e): 192 ↛ exitline 192 didn't return from function 'enforce_db_limit' because the condition on line 192 was always true
193 # Copy the exception message to a OverQuota to propagate to the
194 # API layer.
195 raise EXCEPTIONS.get(entity_type, exception.OverQuota)(str(e))
198def _convert_keys_to_legacy_name(
199 new_dict: ty.Dict[str, int]
200) -> ty.Dict[str, int]:
201 legacy = {}
202 for new_name, old_name in LEGACY_LIMITS.items():
203 # defensive in case oslo or keystone doesn't give us an answer
204 legacy[old_name] = new_dict.get(new_name) or 0
205 return legacy
208def get_legacy_default_limits() -> ty.Dict[str, int]:
209 # TODO(johngarbutt): need oslo.limit API for this, it should do caching
210 enforcer = limit.Enforcer(lambda: None)
211 new_limits = enforcer.get_registered_limits(LEGACY_LIMITS.keys())
212 return _convert_keys_to_legacy_name(dict(new_limits))
215def _keypair_count(context, user_id, *args):
216 count = objects.KeyPairList.get_count_by_user(context, user_id)
217 return {'server_key_pairs': count}
220def _server_group_count(context, project_id, *args):
221 raw_counts = objects.InstanceGroupList.get_counts(context, project_id)
222 return {'server_groups': raw_counts['project']['server_groups']}
225def _server_group_members_count(context, server_group_uuid, *args):
226 # NOTE(johngarbutt) we used to count members added per user
227 server_group = objects.InstanceGroup.get_by_uuid(context,
228 server_group_uuid)
229 return {'server_group_members': len(server_group.members)}
232DB_COUNT_FUNCTION = {
233 KEY_PAIRS: _keypair_count,
234 SERVER_GROUPS: _server_group_count,
235 SERVER_GROUP_MEMBERS: _server_group_members_count
236}