Coverage for nova/virt/libvirt/volume/mount.py: 98%
137 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
« prev ^ index » next coverage.py v7.6.12, created at 2025-04-24 11:16 +0000
1# Copyright 2016,2017 Red Hat, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may
4# not use this file except in compliance with the License. You may obtain
5# a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations
13# under the License.
15import collections
16import contextlib
17import os.path
18import threading
20from oslo_concurrency import processutils
21from oslo_log import log
22from oslo_utils import fileutils
24import nova.conf
25from nova import exception
26import nova.privsep.fs
27import nova.privsep.path
29CONF = nova.conf.CONF
30LOG = log.getLogger(__name__)
33class _HostMountStateManager(object):
34 """A global manager of filesystem mounts.
36 _HostMountStateManager manages a _HostMountState object for the current
37 compute node. Primarily it creates one on host_up(), destroys it on
38 host_down(), and returns it via get_state().
40 _HostMountStateManager manages concurrency itself. Independent callers do
41 not need to consider interactions between multiple _HostMountStateManager
42 calls when designing their own locking.
44 _HostMountStateManager is a singleton, and must only be accessed via:
46 mount.get_manager()
47 """
49 def __init__(self):
50 self._reset_state()
52 def _reset_state(self):
53 """Reset state of global _HostMountStateManager.
55 Should only be called by __init__ and tests.
56 """
58 self.state = None
59 self.use_count = 0
61 # Guards both state and use_count
62 self.cond = threading.Condition()
64 # Incremented each time we initialise a new mount state. Aids
65 # debugging.
66 self.generation = 0
68 @contextlib.contextmanager
69 def get_state(self):
70 """Return the current mount state.
72 _HostMountStateManager will not permit a new state object to be
73 created while any previous state object is still in use.
75 get_state will raise HypervisorUnavailable if the libvirt connection is
76 currently down.
78 :rtype: _HostMountState
79 """
81 # We hold the instance lock here so that if a _HostMountState is
82 # currently initialising we'll wait for it to complete rather than
83 # fail.
84 with self.cond:
85 state = self.state
86 if state is None:
87 raise exception.HypervisorUnavailable()
88 self.use_count += 1
90 try:
91 LOG.debug('Got _HostMountState generation %(gen)i',
92 {'gen': state.generation})
94 yield state
95 finally:
96 with self.cond:
97 self.use_count -= 1
98 self.cond.notify_all()
100 def host_up(self, host):
101 """Initialise a new _HostMountState when the libvirt connection comes
102 up.
104 host_up will destroy and re-initialise the current state if one
105 already exists, but this is considered an error.
107 host_up will block before creating a new state until all operations
108 using a previous state have completed.
110 :param host: A connected libvirt Host object
111 """
112 with self.cond:
113 if self.state is not None:
114 LOG.warning("host_up called, but we think host is already up")
115 self._host_down()
117 # Wait until all operations using a previous state generation are
118 # complete before initialising a new one. Note that self.state is
119 # already None, set either by initialisation or by host_down. This
120 # means the current state will not be returned to any new callers,
121 # and use_count will eventually reach zero.
122 # We do this to avoid a race between _HostMountState initialisation
123 # and an on-going mount/unmount operation
124 while self.use_count != 0:
125 self.cond.wait()
127 # Another thread might have initialised state while we were
128 # wait()ing
129 if self.state is None: 129 ↛ exitline 129 didn't jump to the function exit
130 LOG.debug('Initialising _HostMountState generation %(gen)i',
131 {'gen': self.generation})
132 self.state = _HostMountState(host, self.generation)
133 self.generation += 1
135 def host_down(self):
136 """Destroy the current _HostMountState when the libvirt connection
137 goes down.
138 """
139 with self.cond:
140 if self.state is None:
141 LOG.warning("host_down called, but we don't think host is up")
142 return
144 self._host_down()
146 def _host_down(self):
147 LOG.debug('Destroying MountManager generation %(gen)i',
148 {'gen': self.state.generation})
149 self.state = None
152class _HostMountState(object):
153 """A data structure recording all managed mountpoints and the
154 attachments in use for each one. _HostMountState ensures that the compute
155 node only attempts to mount a single mountpoint in use by multiple
156 attachments once, and that it is not unmounted until it is no longer in use
157 by any attachments.
159 Callers should not create a _HostMountState directly, but should obtain
160 it via:
162 with mount.get_manager().get_state() as state:
163 state.mount(...)
165 On creation _HostMountState inspects the compute host directly to discover
166 all current mountpoints and the attachments on them. After creation it
167 expects to have exclusive control of these mountpoints until it is
168 destroyed.
170 _HostMountState manages concurrency itself. Independent callers do not need
171 to consider interactions between multiple _HostMountState calls when
172 designing their own locking.
173 """
175 class _MountPoint(object):
176 """A single mountpoint, and the set of attachments in use on it."""
178 def __init__(self):
179 # A guard for operations on this mountpoint
180 # N.B. Care is required using this lock, as it will be deleted
181 # if the containing _MountPoint is deleted.
182 self.lock = threading.Lock()
184 # The set of attachments on this mountpoint.
185 self.attachments = set()
187 def add_attachment(self, vol_name, instance_uuid):
188 self.attachments.add((vol_name, instance_uuid))
190 def remove_attachment(self, vol_name, instance_uuid):
191 self.attachments.remove((vol_name, instance_uuid))
193 def in_use(self):
194 return len(self.attachments) > 0
196 def __init__(self, host, generation):
197 """Initialise a _HostMountState by inspecting the current compute
198 host for mountpoints and the attachments in use on them.
200 :param host: A connected libvirt Host object
201 :param generation: An integer indicating the generation of this
202 _HostMountState object. This is 0 for the first
203 _HostMountState created, and incremented for each
204 created subsequently. It is used in log messages to
205 aid debugging.
206 """
207 self.generation = generation
208 self.mountpoints = collections.defaultdict(self._MountPoint)
210 # Iterate over all guests on the connected libvirt
211 for guest in host.list_guests(only_running=False):
212 for disk in guest.get_all_disks():
214 # All remote filesystem volumes are files
215 if disk.source_type != 'file':
216 continue
218 # NOTE(mdbooth): We're assuming that the mountpoint is our
219 # immediate parent, which is currently true for all
220 # volume drivers. We deliberately don't do anything clever
221 # here, because we don't want to, e.g.:
222 # * Add mountpoints for non-volume disks
223 # * Get it wrong when a non-running domain references a
224 # volume which isn't mounted because the host just rebooted.
225 # and this is good enough. We could probably do better here
226 # with more thought.
228 mountpoint = os.path.dirname(disk.source_path)
229 if not os.path.ismount(mountpoint):
230 continue
232 name = os.path.basename(disk.source_path)
233 mount = self.mountpoints[mountpoint]
234 mount.add_attachment(name, guest.uuid)
236 LOG.debug('Discovered volume %(vol)s in use for existing '
237 'mountpoint %(mountpoint)s',
238 {'vol': name, 'mountpoint': mountpoint})
240 @contextlib.contextmanager
241 def _get_locked(self, mountpoint):
242 """Get a locked mountpoint object
244 :param mountpoint: The path of the mountpoint whose object we should
245 return.
246 :rtype: _HostMountState._MountPoint
247 """
248 # This dance is because we delete locks. We need to be sure that the
249 # lock we hold does not belong to an object which has been deleted.
250 # We do this by checking that mountpoint still refers to this object
251 # when we hold the lock. This is safe because:
252 # * we only delete an object from mountpounts whilst holding its lock
253 # * mountpoints is a defaultdict which will atomically create a new
254 # object on access
255 while True:
256 mount = self.mountpoints[mountpoint]
257 with mount.lock:
258 if self.mountpoints[mountpoint] is mount:
259 yield mount
260 break
262 def mount(self, fstype, export, vol_name, mountpoint, instance, options):
263 """Ensure a mountpoint is available for an attachment, mounting it
264 if necessary.
266 If this is the first attachment on this mountpoint, we will mount it
267 with:
269 mount -t <fstype> <options> <export> <mountpoint>
271 :param fstype: The filesystem type to be passed to mount command.
272 :param export: The type-specific identifier of the filesystem to be
273 mounted. e.g. for nfs 'host.example.com:/mountpoint'.
274 :param vol_name: The name of the volume on the remote filesystem.
275 :param mountpoint: The directory where the filesystem will be
276 mounted on the local compute host.
277 :param instance: The instance the volume will be attached to.
278 :param options: An arbitrary list of additional arguments to be
279 passed to the mount command immediate before export
280 and mountpoint.
281 """
283 # NOTE(mdbooth): mount() may currently be called multiple times for a
284 # single attachment. Any operation which calls
285 # LibvirtDriver._hard_reboot will re-attach volumes which are probably
286 # already attached, resulting in multiple mount calls.
288 LOG.debug('_HostMountState.mount(fstype=%(fstype)s, '
289 'export=%(export)s, vol_name=%(vol_name)s, %(mountpoint)s, '
290 'options=%(options)s) generation %(gen)s',
291 {'fstype': fstype, 'export': export, 'vol_name': vol_name,
292 'mountpoint': mountpoint, 'options': options,
293 'gen': self.generation}, instance=instance)
294 with self._get_locked(mountpoint) as mount:
295 if os.path.ismount(mountpoint):
296 LOG.debug(('Mounting %(mountpoint)s generation %(gen)s, '
297 'mountpoint already mounted'),
298 {'mountpoint': mountpoint, 'gen': self.generation},
299 instance=instance)
300 else:
301 LOG.debug('Mounting %(mountpoint)s generation %(gen)s',
302 {'mountpoint': mountpoint, 'gen': self.generation},
303 instance=instance)
305 fileutils.ensure_tree(mountpoint)
307 try:
308 nova.privsep.fs.mount(fstype, export, mountpoint, options)
309 except processutils.ProcessExecutionError:
310 # Check to see if mountpoint is mounted despite the error
311 # eg it was already mounted
312 if os.path.ismount(mountpoint):
313 # We're not going to raise the exception because we're
314 # in the desired state anyway. However, this is still
315 # unusual so we'll log it.
316 LOG.exception(
317 'Error mounting %(fstypes export %(export)s on '
318 '%(mountpoint)s. Continuing because mountpount is '
319 'mounted despite this.',
320 {'fstype': fstype, 'export': export,
321 'mountpoint': mountpoint}, instance=instance)
322 else:
323 # If the mount failed there's no reason for us to keep
324 # a record of it. It will be created again if the
325 # caller retries.
327 # Delete while holding lock
328 del self.mountpoints[mountpoint]
330 raise
332 mount.add_attachment(vol_name, instance.uuid)
334 LOG.debug('_HostMountState.mount() for %(mountpoint)s '
335 'generation %(gen)s completed successfully',
336 {'mountpoint': mountpoint, 'gen': self.generation},
337 instance=instance)
339 def umount(self, vol_name, mountpoint, instance):
340 """Mark an attachment as no longer in use, and unmount its mountpoint
341 if necessary.
343 :param vol_name: The name of the volume on the remote filesystem.
344 :param mountpoint: The directory where the filesystem is be
345 mounted on the local compute host.
346 :param instance: The instance the volume was attached to.
347 :returns: True if the mountpoint is still in used by another instance
348 """
349 LOG.debug('_HostMountState.umount(vol_name=%(vol_name)s, '
350 'mountpoint=%(mountpoint)s) generation %(gen)s',
351 {'vol_name': vol_name, 'mountpoint': mountpoint,
352 'gen': self.generation}, instance=instance)
353 with self._get_locked(mountpoint) as mount:
354 try:
355 mount.remove_attachment(vol_name, instance.uuid)
356 except KeyError:
357 LOG.warning("Request to remove attachment (%(vol_name)s from "
358 "%(mountpoint)s, but we don't think it's in use.",
359 {'vol_name': vol_name, 'mountpoint': mountpoint},
360 instance=instance)
362 if not mount.in_use():
363 mounted = os.path.ismount(mountpoint)
365 if mounted: 365 ↛ 369line 365 didn't jump to line 369 because the condition on line 365 was always true
366 mounted = self._real_umount(mountpoint)
368 # Delete our record entirely if it's unmounted
369 if not mounted:
370 del self.mountpoints[mountpoint]
372 LOG.debug('_HostMountState.umount() for %(mountpoint)s '
373 'generation %(gen)s completed successfully',
374 {'mountpoint': mountpoint, 'gen': self.generation},
375 instance=instance)
377 return mount.in_use()
379 def _real_umount(self, mountpoint):
380 # Unmount and delete a mountpoint.
381 # Return mount state after umount (i.e. True means still mounted)
382 LOG.debug('Unmounting %(mountpoint)s generation %(gen)s',
383 {'mountpoint': mountpoint, 'gen': self.generation})
385 try:
386 nova.privsep.fs.umount(mountpoint)
387 except processutils.ProcessExecutionError as ex:
388 LOG.error("Couldn't unmount %(mountpoint)s: %(reason)s",
389 {'mountpoint': mountpoint, 'reason': str(ex)})
391 if not os.path.ismount(mountpoint):
392 nova.privsep.path.rmdir(mountpoint)
393 return False
395 return True
398__manager__ = _HostMountStateManager()
401def get_manager():
402 """Return the _HostMountStateManager singleton.
404 :rtype: _HostMountStateManager
405 """
406 return __manager__
409def mount(fstype, export, vol_name, mountpoint, instance, options=None):
410 """A convenience wrapper around _HostMountState.mount(), called via the
411 _HostMountStateManager singleton.
412 """
413 with __manager__.get_state() as mount_state:
414 mount_state.mount(fstype, export, vol_name, mountpoint, instance,
415 options)
418def umount(vol_name, mountpoint, instance):
419 """A convenience wrapper around _HostMountState.umount(), called via the
420 _HostMountStateManager singleton.
421 """
422 with __manager__.get_state() as mount_state:
423 return mount_state.umount(vol_name, mountpoint, instance)