lock_order/lib.rs
1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Tools for describing and enforcing lock acquisition order.
6//!
7//! Using code defines lock levels with types and then implements traits from
8//! this crate, like [`relation::LockAfter`] to describe how those locks can
9//! be acquired. A separate set of traits in [`lock`] implement locked access
10//! for your type. A complete example:
11//!
12//! ```
13//! use std::sync::Mutex;
14//! use lock_order::{impl_lock_after, lock::LockFor, relation::LockAfter, Locked, Unlocked};
15//!
16//! #[derive(Default)]
17//! struct HoldsLocks {
18//! a: Mutex<u8>,
19//! b: Mutex<u32>,
20//! }
21//!
22//! enum LockA {}
23//! enum LockB {}
24//!
25//! impl LockFor<LockA> for HoldsLocks {
26//! type Data = u8;
27//! type Guard<'l> = std::sync::MutexGuard<'l, u8>
28//! where Self: 'l;
29//! fn lock(&self) -> Self::Guard<'_> {
30//! self.a.lock().unwrap()
31//! }
32//! }
33//!
34//! impl LockFor<LockB> for HoldsLocks {
35//! type Data = u32;
36//! type Guard<'l> = std::sync::MutexGuard<'l, u32>
37//! where Self: 'l;
38//! fn lock(&self) -> Self::Guard<'_> {
39//! self.b.lock().unwrap()
40//! }
41//! }
42//!
43//! // LockA is the top of the lock hierarchy.
44//! impl LockAfter<Unlocked> for LockA {}
45//! // LockA can be acquired before LockB.
46//! impl_lock_after!(LockA => LockB);
47//!
48//! // Accessing locked state looks like this:
49//!
50//! let state = HoldsLocks::default();
51//! // Create a new lock session with the "root" lock level (empty tuple).
52//! let mut locked = Locked::new(&state);
53//! // Access locked state.
54//! let (a, mut locked_a) = locked.lock_and::<LockA>();
55//! let b = locked_a.lock::<LockB>();
56//! ```
57//!
58//! The methods on [`Locked`] prevent out-of-order locking according to the
59//! specified lock relationships.
60//!
61//! This won't compile because `LockB` does not implement `LockBefore<LockA>`:
62//! ```compile_fail
63//! # use std::sync::Mutex;
64//! # use lock_order::{impl_lock_after, lock::LockFor, relation::LockAfter, Locked, Unlocked};
65//! #
66//! # #[derive(Default)]
67//! # struct HoldsLocks {
68//! # a: Mutex<u8>,
69//! # b: Mutex<u32>,
70//! # }
71//! #
72//! # enum LockA {}
73//! # enum LockB {}
74//! #
75//! # impl LockFor<LockA> for HoldsLocks {
76//! # type Data = u8;
77//! # type Guard<'l> = std::sync::MutexGuard<'l, u8>
78//! # where Self: 'l;
79//! # fn lock(&self) -> Self::Guard<'_> {
80//! # self.a.lock().unwrap()
81//! # }
82//! # }
83//! #
84//! # impl LockFor<LockB> for HoldsLocks {
85//! # type Data = u32;
86//! # type Guard<'l> = std::sync::MutexGuard<'l, u32>
87//! # where Self: 'l;
88//! # fn lock(&self) -> Self::Guard<'_> {
89//! # self.b.lock().unwrap()
90//! # }
91//! # }
92//! #
93//! # // LockA is the top of the lock hierarchy.
94//! # impl LockAfter<Unlocked> for LockA {}
95//! # // LockA can be acquired before LockB.
96//! # impl_lock_after!(LockA => LockB);
97//! #
98//! #
99//! let state = HoldsLocks::default();
100//! let mut locked = Locked::new(&state);
101//!
102//! // Locking B without A is fine, but locking A after B is not.
103//! let (b, mut locked_b) = locked.lock_and::<LockB>();
104//! // compile error: LockB does not implement LockBefore<LockA>
105//! let a = locked_b.lock::<LockA>();
106//! ```
107//!
108//! Even if the lock guard goes out of scope, the new `Locked` instance returned
109//! by [Locked::lock_and] will prevent the original one from being used to
110//! access state. This doesn't work:
111//! ```compile_fail
112//! # use std::sync::Mutex;
113//! # use lock_order::{impl_lock_after, lock::LockFor, relation::LockAfter, Locked, Unlocked};
114//! #
115//! # #[derive(Default)]
116//! # struct HoldsLocks {
117//! # a: Mutex<u8>,
118//! # b: Mutex<u32>,
119//! # }
120//! #
121//! # enum LockA {}
122//! # enum LockB {}
123//! #
124//! # impl LockFor<LockA> for HoldsLocks {
125//! # type Data = u8;
126//! # type Guard<'l> = std::sync::MutexGuard<'l, u8>
127//! # where Self: 'l;
128//! # fn lock(&self) -> Self::Guard<'_> {
129//! # self.a.lock().unwrap()
130//! # }
131//! # }
132//! #
133//! # impl LockFor<LockB> for HoldsLocks {
134//! # type Data = u32;
135//! # type Guard<'l> = std::sync::MutexGuard<'l, u32>
136//! # where Self: 'l;
137//! # fn lock(&self) -> Self::Guard<'_> {
138//! # self.b.lock().unwrap()
139//! # }
140//! # }
141//! #
142//! # // LockA is the top of the lock hierarchy.
143//! # impl LockAfter<Unlocked> for LockA {}
144//! # // LockA can be acquired before LockB.
145//! # impl_lock_after!(LockA => LockB);
146//! #
147//! #
148//! let state = HoldsLocks::default();
149//! let mut locked = Locked::new(&state);
150//!
151//! let (b, mut locked_b) = locked.lock_and::<LockB>();
152//! drop(b);
153//! let b = locked_b.lock::<LockB>();
154//! // Won't work; `locked` is mutably borrowed by `locked_b`.
155//! let a = locked.lock::<LockA>();
156//! ```
157//!
158//! The [`impl_lock_after`] macro provides implementations of `LockAfter` for
159//! a pair of locks. The complete lock ordering tree can be spelled out by
160//! calling `impl_lock_after` for each parent and child in the hierarchy. One
161//! of the upsides to using `impl_lock_after` is that it also prevents
162//! accidental lock ordering inversion. This won't compile:
163//! ```compile_fail
164//! enum LockA {}
165//! enum LockB {}
166//!
167//! impl_lock_after(LockA => LockB);
168//! impl_lock_after(LockB => LockA);
169//! ```
170
171#![cfg_attr(not(test), no_std)]
172
173pub mod lock;
174pub mod relation;
175pub mod wrap;
176
177use core::marker::PhantomData;
178use core::ops::Deref;
179
180use crate::lock::{LockFor, RwLockFor, UnlockedAccess};
181use crate::relation::LockBefore;
182
183/// Enforcement mechanism for lock ordering.
184///
185/// `Locked` won't allow locking that violates the described lock order. It
186/// enforces this by allowing access to state so long as either
187/// 1. the state does not require a lock to access, or
188/// 2. the state does require a lock and that lock comes after the current
189/// lock level in the global lock order.
190///
191/// In the locking case, acquiring a lock produces the new state and a new
192/// `Locked` instance that mutably borrows from the original instance. This
193/// means the original instance can't be used to acquire new locks until the
194/// new instance leaves scope.
195pub struct Locked<T, L>(T, PhantomData<L>);
196
197/// "Highest" lock level
198///
199/// The lock level for the thing returned by `Locked::new`. Users of this crate
200/// should implement `LockAfter<Unlocked>` for the root of any lock ordering
201/// trees.
202pub struct Unlocked;
203
204impl<'a, T> Locked<&'a T, Unlocked> {
205 /// Entry point for locked access.
206 ///
207 /// `Unlocked` is the "root" lock level and can be acquired before any lock.
208 ///
209 /// This function is equivalent to [`Locked::new_with_deref`] but coerces
210 /// the argument to a simple borrow, which is the expected common use case.
211 pub fn new(t: &'a T) -> Self {
212 Self::new_with_deref(t)
213 }
214}
215
216impl<T> Locked<T, Unlocked>
217where
218 T: Deref,
219 T::Target: Sized,
220{
221 /// Entry point for locked access.
222 ///
223 /// `Unlocked` is the "root" lock level and can be acquired before any lock.
224 ///
225 /// Unlike [`Locked::new`], this function just requires that `T` be
226 /// [`Deref`] and doesn't coerce the type. Use this function when creating a
227 /// new `Locked` from cell-like types.
228 ///
229 /// Prefer [`Locked::new`] in most situations given the coercion to a simple
230 /// borrow is generally less surprising. For example, `&mut T` also `Deref`s
231 /// to `T` and makes for sometimes hard to pin down compilation errors when
232 /// implementing traits for `Locked<&State, L>` as opposed to `&mut State`.
233 pub fn new_with_deref(t: T) -> Self {
234 Self::new_locked_with_deref(t)
235 }
236}
237
238impl<'a, T, L> Locked<&'a T, L> {
239 /// Entry point for locked access.
240 ///
241 /// Creates a new `Locked` that restricts locking to levels after `L`. This
242 /// is safe because any acquirable locks must have a total ordering, and
243 /// restricting the set of locks doesn't violate that ordering.
244 ///
245 /// See discussion on [`Locked::new_with_deref`] for when to use this
246 /// function versus [`Locked::new_locked_with_deref`].
247 pub fn new_locked(t: &'a T) -> Locked<&'a T, L> {
248 Self::new_locked_with_deref(t)
249 }
250
251 /// Access some state that doesn't require locking.
252 ///
253 /// This allows access to state that doesn't require locking (and depends on
254 /// [`UnlockedAccess`] to be implemented only in cases where that is true).
255 pub fn unlocked_access<M>(&self) -> T::Guard<'a>
256 where
257 T: UnlockedAccess<M>,
258 {
259 let Self(t, PhantomData) = self;
260 T::access(t)
261 }
262
263 /// Access some state that doesn't require locking from an internal impl of
264 /// [`UnlockedAccess`].
265 ///
266 /// This allows access to state that doesn't require locking (and depends on
267 /// [`UnlockedAccess`] to be implemented only in cases where that is true).
268 pub fn unlocked_access_with<M, X>(&self, f: impl FnOnce(&'a T) -> &'a X) -> X::Guard<'a>
269 where
270 X: UnlockedAccess<M>,
271 {
272 let Self(t, PhantomData) = self;
273 X::access(f(t))
274 }
275}
276
277// It's important that the lifetime on `Locked` here be anonymous. That means
278// that the lifetimes in the returned `Locked` objects below are inferred to
279// be the lifetimes of the references to self (mutable or immutable).
280impl<T, L> Locked<T, L>
281where
282 T: Deref,
283 T::Target: Sized,
284{
285 /// Entry point for locked access.
286 ///
287 /// Creates a new `Locked` that restricts locking to levels after `L`. This
288 /// is safe because any acquirable locks must have a total ordering, and
289 /// restricting the set of locks doesn't violate that ordering.
290 ///
291 /// See discussion on [`Locked::new_with_deref`] for when to use this
292 /// function versus [`Locked::new_locked`].
293 pub fn new_locked_with_deref(t: T) -> Locked<T, L> {
294 Self(t, PhantomData)
295 }
296
297 /// Acquire the given lock.
298 ///
299 /// This requires that `M` can be locked after `L`.
300 pub fn lock<M>(&mut self) -> <T::Target as LockFor<M>>::Guard<'_>
301 where
302 T::Target: LockFor<M>,
303 L: LockBefore<M>,
304 {
305 self.lock_with::<M, _>(|t| t)
306 }
307
308 /// Acquire the given lock and a new locked context.
309 ///
310 /// This requires that `M` can be locked after `L`.
311 pub fn lock_and<M>(&mut self) -> (<T::Target as LockFor<M>>::Guard<'_>, Locked<&T::Target, M>)
312 where
313 T::Target: LockFor<M>,
314 L: LockBefore<M>,
315 {
316 self.lock_with_and::<M, _>(|t| t)
317 }
318
319 /// Acquire the given lock from an internal impl of [`LockFor`].
320 ///
321 /// This requires that `M` can be locked after `L`.
322 pub fn lock_with<M, X>(&mut self, f: impl FnOnce(&T::Target) -> &X) -> X::Guard<'_>
323 where
324 X: LockFor<M>,
325 L: LockBefore<M>,
326 {
327 let (data, _): (_, Locked<&T::Target, M>) = self.lock_with_and::<M, _>(f);
328 data
329 }
330
331 /// Acquire the given lock and a new locked context from an internal impl of
332 /// [`LockFor`].
333 ///
334 /// This requires that `M` can be locked after `L`.
335 pub fn lock_with_and<M, X>(
336 &mut self,
337 f: impl FnOnce(&T::Target) -> &X,
338 ) -> (X::Guard<'_>, Locked<&T::Target, M>)
339 where
340 X: LockFor<M>,
341 L: LockBefore<M>,
342 {
343 let Self(t, PhantomData) = self;
344 let t = Deref::deref(t);
345 let data = X::lock(f(t));
346 (data, Locked(t, PhantomData))
347 }
348
349 /// Attempt to acquire the given read lock.
350 ///
351 /// For accessing state via reader/writer locks. This requires that `M` can
352 /// be locked after `L`.
353 pub fn read_lock<M>(&mut self) -> <T::Target as RwLockFor<M>>::ReadGuard<'_>
354 where
355 T::Target: RwLockFor<M>,
356 L: LockBefore<M>,
357 {
358 self.read_lock_with::<M, _>(|t| t)
359 }
360
361 /// Attempt to acquire the given read lock and a new locked context.
362 ///
363 /// For accessing state via reader/writer locks. This requires that `M` can
364 /// be locked after `L`.
365 pub fn read_lock_and<M>(
366 &mut self,
367 ) -> (<T::Target as RwLockFor<M>>::ReadGuard<'_>, Locked<&T::Target, M>)
368 where
369 T::Target: RwLockFor<M>,
370 L: LockBefore<M>,
371 {
372 self.read_lock_with_and::<M, _>(|t| t)
373 }
374
375 /// Attempt to acquire the given read lock from an internal impl of
376 /// [`RwLockFor`].
377 ///
378 /// For accessing state via reader/writer locks. This requires that `M` can
379 /// be locked after `L`.
380 pub fn read_lock_with<M, X>(&mut self, f: impl FnOnce(&T::Target) -> &X) -> X::ReadGuard<'_>
381 where
382 X: RwLockFor<M>,
383 L: LockBefore<M>,
384 {
385 let (data, _): (_, Locked<&T::Target, M>) = self.read_lock_with_and::<M, _>(f);
386 data
387 }
388
389 /// Attempt to acquire the given read lock and a new locked context from an
390 /// internal impl of [`RwLockFor`].
391 ///
392 /// For accessing state via reader/writer locks. This requires that `M` can
393 /// be locked after `L`.
394 pub fn read_lock_with_and<M, X>(
395 &mut self,
396 f: impl FnOnce(&T::Target) -> &X,
397 ) -> (X::ReadGuard<'_>, Locked<&T::Target, M>)
398 where
399 X: RwLockFor<M>,
400 L: LockBefore<M>,
401 {
402 let Self(t, PhantomData) = self;
403 let t = Deref::deref(t);
404 let data = X::read_lock(f(t));
405 (data, Locked(t, PhantomData))
406 }
407
408 /// Attempt to acquire the given write lock.
409 ///
410 /// For accessing state via reader/writer locks. This requires that `M` can
411 /// be locked after `L`.
412 pub fn write_lock<M>(&mut self) -> <T::Target as RwLockFor<M>>::WriteGuard<'_>
413 where
414 T::Target: RwLockFor<M>,
415 L: LockBefore<M>,
416 {
417 self.write_lock_with::<M, _>(|t| t)
418 }
419
420 /// Attempt to acquire the given write lock.
421 ///
422 /// For accessing state via reader/writer locks. This requires that `M` can
423 /// be locked after `L`.
424 pub fn write_lock_and<M>(
425 &mut self,
426 ) -> (<T::Target as RwLockFor<M>>::WriteGuard<'_>, Locked<&T::Target, M>)
427 where
428 T::Target: RwLockFor<M>,
429 L: LockBefore<M>,
430 {
431 self.write_lock_with_and::<M, _>(|t| t)
432 }
433
434 /// Attempt to acquire the given write lock from an internal impl of
435 /// [`RwLockFor`].
436 ///
437 /// For accessing state via reader/writer locks. This requires that `M` can
438 /// be locked after `L`.
439 pub fn write_lock_with<M, X>(&mut self, f: impl FnOnce(&T::Target) -> &X) -> X::WriteGuard<'_>
440 where
441 X: RwLockFor<M>,
442 L: LockBefore<M>,
443 {
444 let (data, _): (_, Locked<&T::Target, M>) = self.write_lock_with_and::<M, _>(f);
445 data
446 }
447
448 /// Attempt to acquire the given write lock from an internal impl of
449 /// [`RwLockFor`].
450 ///
451 /// For accessing state via reader/writer locks. This requires that `M` can
452 /// be locked after `L`.
453 pub fn write_lock_with_and<M, X>(
454 &mut self,
455 f: impl FnOnce(&T::Target) -> &X,
456 ) -> (X::WriteGuard<'_>, Locked<&T::Target, M>)
457 where
458 X: RwLockFor<M>,
459 L: LockBefore<M>,
460 {
461 let Self(t, PhantomData) = self;
462 let t = Deref::deref(t);
463 let data = X::write_lock(f(t));
464 (data, Locked(t, PhantomData))
465 }
466
467 /// Returns an owned `Locked` from a current `Locked`.
468 ///
469 /// Useful when callers need to have access to an owned `Locked` but only
470 /// have access to a reference.
471 ///
472 /// This method is a shorthand for `self.cast_with(|s| s)`. This is safe
473 /// because the returned `Locked` instance borrows `self` mutably so it
474 /// can't be used until the new instance is dropped.
475 pub fn as_owned(&mut self) -> Locked<&T::Target, L> {
476 self.cast_with(|s| s)
477 }
478
479 /// Narrow the type on which locks can be acquired.
480 ///
481 /// Like `cast_with`, but with `AsRef` instead of using a callable function.
482 /// The same safety arguments apply.
483 pub fn cast<R>(&mut self) -> Locked<&R, L>
484 where
485 T::Target: AsRef<R>,
486 {
487 self.cast_with(AsRef::as_ref)
488 }
489
490 /// Narrow the type on which locks can be acquired.
491 ///
492 /// This allows scoping down the state on which locks are acquired. This is
493 /// safe because
494 /// 1. the locked wrapper does not take the type `T` being locked into
495 /// account, so there's no danger of lock ordering being different for
496 /// `T` and some other type `R`,
497 /// 2. because the new `&R` references a part of the original `&T`, any
498 /// state that was lockable from `&T` was lockable from `&R`, and
499 /// 3. the returned `Locked` instance borrows `self` mutably so it can't
500 /// be used until the new instance is dropped.
501 ///
502 /// This method provides a flexible way to access some state held within the
503 /// protected instance of `T` by scoping down to an individual field, or
504 /// infallibly indexing into a `Vec`, slice, or map.
505 pub fn cast_with<R>(&mut self, f: impl FnOnce(&T::Target) -> &R) -> Locked<&R, L> {
506 let Self(t, PhantomData) = self;
507 Locked(f(Deref::deref(t)), PhantomData)
508 }
509
510 /// Restrict locking as if a lock was acquired.
511 ///
512 /// Like `lock_and` but doesn't actually acquire the lock `M`. This is
513 /// safe because any locks that could be acquired with the lock `M` held can
514 /// also be acquired without `M` being held.
515 pub fn cast_locked<M>(&mut self) -> Locked<&T::Target, M>
516 where
517 L: LockBefore<M>,
518 {
519 let Self(t, _marker) = self;
520 Locked(Deref::deref(t), PhantomData)
521 }
522
523 /// Convenience function for accessing copyable state.
524 ///
525 /// This, combined with `cast` or `cast_with`, makes it easy to access
526 /// non-locked state.
527 pub fn copied(&self) -> T::Target
528 where
529 T::Target: Copy,
530 {
531 let Self(t, PhantomData) = self;
532 *t.deref()
533 }
534
535 /// Adopts reference `n` to the locked context.
536 ///
537 /// This allows access on disjoint structures to adopt the same lock level.
538 ///
539 /// # Examples
540 ///
541 /// ```no_run
542 /// use lock_order::{Locked, relation::LockBefore};
543 /// struct StateA;
544 /// struct StateB;
545 /// # impl lock_order::lock::LockFor<LockX> for StateB {
546 /// # type Data = u8;
547 /// # type Guard<'l> = std::sync::MutexGuard<'l, u8>
548 /// # where Self: 'l;
549 /// # fn lock(&self) -> Self::Guard<'_> {
550 /// # unimplemented!()
551 /// # }
552 /// # }
553 /// enum LockX {}
554 ///
555 /// fn adopt_example<L: LockBefore<LockX>>(mut locked: Locked<&StateA, L>, state_b: &StateB) {
556 /// let mut locked = locked.adopt(state_b);
557 /// // Lock something from `StateB` advancing the lock level to `LockX`.
558 /// let (guard, mut locked) = locked.lock_with_and::<LockX, _>(|c| c.right());
559 /// // We can get back a `Locked` for `StateA` at the new lock level.
560 /// let locked: Locked<&StateA, LockX> = locked.cast_with(|c| c.left());
561 /// }
562 /// ```
563 pub fn adopt<'a, N>(
564 &'a mut self,
565 n: &'a N,
566 ) -> Locked<OwnedTupleWrapper<&'a T::Target, &'a N>, L> {
567 let Self(t, PhantomData) = self;
568 Locked(OwnedWrapper(TupleWrapper(Deref::deref(t), n)), PhantomData)
569 }
570
571 /// Casts the left reference of the [`TupleWrapper`] deref'ed by `T`.
572 pub fn cast_left<'a, X, A: Deref + 'a, B: Deref + 'a, F: FnOnce(&A::Target) -> &X>(
573 &'a mut self,
574 f: F,
575 ) -> Locked<OwnedTupleWrapper<&'a X, &'a B::Target>, L>
576 where
577 T: Deref<Target = TupleWrapper<A, B>>,
578 {
579 let Self(t, PhantomData) = self;
580 Locked(Deref::deref(t).cast_left(f), PhantomData)
581 }
582
583 /// Casts the right reference of the [`TupleWrapper`] deref'ed by `T`.
584 pub fn cast_right<'a, X, A: Deref + 'a, B: Deref + 'a, F: FnOnce(&B::Target) -> &X>(
585 &'a mut self,
586 f: F,
587 ) -> Locked<OwnedTupleWrapper<&'a A::Target, &'a X>, L>
588 where
589 T: Deref<Target = TupleWrapper<A, B>>,
590 {
591 let Self(t, PhantomData) = self;
592 Locked(Deref::deref(t).cast_right(f), PhantomData)
593 }
594
595 /// Replaces the internal type entirely but keeps the lock level.
596 ///
597 /// This does not break ordering because the new `Locked` takes a
598 /// mutable borrow on the current one.
599 pub fn replace<'a, N>(&'a mut self, n: &'a N) -> Locked<&'a N, L> {
600 Locked::new_locked(n)
601 }
602}
603
604/// An owned wrapper for `T` that implements [`Deref`].
605pub struct OwnedWrapper<T>(T);
606
607impl<T> Deref for OwnedWrapper<T> {
608 type Target = T;
609
610 fn deref(&self) -> &Self::Target {
611 let Self(t) = self;
612 t
613 }
614}
615
616/// A convenient alias for a [`TupleWrapper`] inside an [`OwnedWrapper`].
617pub type OwnedTupleWrapper<A, B> = OwnedWrapper<TupleWrapper<A, B>>;
618
619/// A wrapper for tuples to support implementing [`Locked::adopt`].
620pub struct TupleWrapper<A, B>(A, B);
621
622impl<A, B> TupleWrapper<A, B>
623where
624 A: Deref,
625 B: Deref,
626{
627 pub fn left(&self) -> &A::Target {
628 let Self(a, _) = self;
629 a.deref()
630 }
631
632 pub fn right(&self) -> &B::Target {
633 let Self(_, b) = self;
634 b.deref()
635 }
636
637 pub fn both(&self) -> (&A::Target, &B::Target) {
638 let Self(a, b) = self;
639 (a.deref(), b.deref())
640 }
641
642 pub fn cast_left<X, F: FnOnce(&A::Target) -> &X>(
643 &self,
644 f: F,
645 ) -> OwnedTupleWrapper<&X, &B::Target> {
646 let Self(a, b) = self;
647 OwnedWrapper(TupleWrapper(f(Deref::deref(a)), Deref::deref(b)))
648 }
649
650 pub fn cast_right<X, F: FnOnce(&B::Target) -> &X>(
651 &self,
652 f: F,
653 ) -> OwnedTupleWrapper<&A::Target, &X> {
654 let Self(a, b) = self;
655 OwnedWrapper(TupleWrapper(Deref::deref(a), f(Deref::deref(b))))
656 }
657}
658
659#[cfg(test)]
660mod test {
661 use std::ops::Deref;
662 use std::sync::{Mutex, MutexGuard};
663
664 mod lock_levels {
665 //! Lock ordering tree:
666 //! A -> B -> {C, D}
667
668 extern crate self as lock_order;
669
670 use crate::relation::LockAfter;
671 use crate::{impl_lock_after, Unlocked};
672
673 pub enum A {}
674 pub enum B {}
675 pub enum C {}
676 pub enum D {}
677 pub enum E {}
678
679 impl LockAfter<Unlocked> for A {}
680 impl_lock_after!(A => B);
681 impl_lock_after!(B => C);
682 impl_lock_after!(B => D);
683 impl_lock_after!(D => E);
684 }
685
686 use crate::lock::{LockFor, UnlockedAccess};
687 use crate::Locked;
688 use lock_levels::{A, B, C, D, E};
689
690 /// Data type with multiple locked fields.
691 #[derive(Default)]
692 struct Data {
693 a: Mutex<u8>,
694 b: Mutex<u16>,
695 c: Mutex<u64>,
696 d: Mutex<u128>,
697 e: Vec<Mutex<usize>>,
698 u: usize,
699 }
700
701 impl LockFor<A> for Data {
702 type Data = u8;
703 type Guard<'l> = MutexGuard<'l, u8>;
704 fn lock(&self) -> Self::Guard<'_> {
705 self.a.lock().unwrap()
706 }
707 }
708
709 impl LockFor<B> for Data {
710 type Data = u16;
711 type Guard<'l> = MutexGuard<'l, u16>;
712 fn lock(&self) -> Self::Guard<'_> {
713 self.b.lock().unwrap()
714 }
715 }
716
717 impl LockFor<C> for Data {
718 type Data = u64;
719 type Guard<'l> = MutexGuard<'l, u64>;
720 fn lock(&self) -> Self::Guard<'_> {
721 self.c.lock().unwrap()
722 }
723 }
724
725 impl LockFor<D> for Data {
726 type Data = u128;
727 type Guard<'l> = MutexGuard<'l, u128>;
728 fn lock(&self) -> Self::Guard<'_> {
729 self.d.lock().unwrap()
730 }
731 }
732
733 impl LockFor<E> for Mutex<usize> {
734 type Data = usize;
735 type Guard<'l> = MutexGuard<'l, usize>;
736 fn lock(&self) -> Self::Guard<'_> {
737 self.lock().unwrap()
738 }
739 }
740
741 enum UnlockedUsize {}
742 enum UnlockedELen {}
743
744 impl UnlockedAccess<UnlockedUsize> for Data {
745 type Data = usize;
746 type Guard<'l>
747 = &'l usize
748 where
749 Self: 'l;
750
751 fn access(&self) -> Self::Guard<'_> {
752 &self.u
753 }
754 }
755
756 struct DerefWrapper<T>(T);
757
758 impl<T> Deref for DerefWrapper<T> {
759 type Target = T;
760 fn deref(&self) -> &Self::Target {
761 &self.0
762 }
763 }
764
765 impl UnlockedAccess<UnlockedELen> for Data {
766 type Data = usize;
767 type Guard<'l> = DerefWrapper<usize>;
768
769 fn access(&self) -> Self::Guard<'_> {
770 DerefWrapper(self.e.len())
771 }
772 }
773
774 #[test]
775 fn lock_a_then_c() {
776 let data = Data::default();
777
778 let mut w = Locked::new(&data);
779 let (_a, mut wa) = w.lock_and::<A>();
780 let (_c, _wc) = wa.lock_and::<C>();
781 // This won't compile!
782 // let _b = _wc.lock::<B>();
783 }
784
785 #[test]
786 fn unlocked_access_does_not_prevent_locking() {
787 let data = Data { a: Mutex::new(15), u: 34, ..Data::default() };
788
789 let mut locked = Locked::new(&data);
790 let u = locked.unlocked_access::<UnlockedUsize>();
791
792 // Prove that `u` does not prevent locked state from being accessed.
793 let a = locked.lock::<A>();
794 assert_eq!(u, &34);
795 assert_eq!(&*a, &15);
796 }
797
798 #[test]
799 fn unlocked_access_with_does_not_prevent_locking() {
800 let data = Data { a: Mutex::new(15), u: 34, ..Data::default() };
801 let data = (data,);
802
803 let mut locked = Locked::new(&data);
804 let u = locked.unlocked_access_with::<UnlockedUsize, _>(|(data,)| data);
805
806 // Prove that `u` does not prevent locked state from being accessed.
807 let a = locked.lock_with::<A, _>(|(data,)| data);
808 assert_eq!(u, &34);
809 assert_eq!(&*a, &15);
810 }
811
812 /// Demonstrate how [`Locked::cast_with`] can be used to index into a `Vec`.
813 #[test]
814 fn cast_with_for_indexing_into_sub_field_state() {
815 let data = Data { e: (0..10).map(Mutex::new).collect(), ..Data::default() };
816
817 let mut locked = Locked::new(&data);
818 for i in 0..*locked.unlocked_access::<UnlockedELen>() {
819 // Use cast_with to select an individual lock from the list.
820 let mut locked_element = locked.cast_with(|data| &data.e[i]);
821 let mut item = locked_element.lock::<E>();
822
823 assert_eq!(*item, i);
824 *item = i + 1;
825 }
826 }
827
828 #[test]
829 fn adopt() {
830 let data_left = Data { a: Mutex::new(55), b: Mutex::new(11), ..Data::default() };
831 let mut locked = Locked::new(&data_left);
832 let data_right = Data { a: Mutex::new(66), b: Mutex::new(22), ..Data::default() };
833 let mut locked = locked.adopt(&data_right);
834
835 let (guard_left, mut locked) = locked.lock_with_and::<A, Data>(|t| t.left());
836 let guard_right = locked.lock_with::<B, Data>(|t| t.right());
837 assert_eq!(*guard_left, 55);
838 assert_eq!(*guard_right, 22);
839 }
840}