1use fuchsia_sync::{MutexGuard, RwLockReadGuard, RwLockWriteGuard};
6use std::marker::PhantomData;
7
8#[cfg(feature = "detect_lock_dep_cycles")]
9mod tracking {
10 use std::cell::RefCell;
11
12 struct HeldLock {
14 encoded_value: usize,
16 active_subclass_tokens: usize,
18 name: &'static str,
20 }
21
22 struct ThreadState {
24 held_locks: Vec<HeldLock>,
26 }
27
28 thread_local! {
29 static STATE: RefCell<ThreadState> = const { RefCell::new(ThreadState {
30 held_locks: Vec::new(),
31 }) };
32 }
33
34 #[inline(always)]
39 fn check_and_push_lock(target_value: usize, name: &'static str) {
40 STATE.with(|state| {
41 let mut s = state.borrow_mut();
42 if let Some(last) = s.held_locks.last() {
43 let last_value = last.encoded_value;
44 let last_level = last_value & !0xF;
45 let target_level = target_value & !0xF;
46
47 if target_value == last_value {
48 panic!(
49 "LockDep: Self-deadlock detected on lock '{name}' (level {target_value})!"
50 );
51 }
52 if target_level < last_level {
53 panic!(
54 "Invalid lock ordering cycle detected: attempted to acquire '{name}' \
55 after '{}' ({target_level} < {last_level})!",
56 last.name
57 );
58 }
59 if target_level == last_level {
60 if last.active_subclass_tokens == 0 {
62 panic!(
63 "LockDep: Subclassing not allowed or already consumed for lock '{}'",
64 last.name
65 );
66 }
67 }
68 }
69 s.held_locks.push(HeldLock {
70 encoded_value: target_value,
71 active_subclass_tokens: 0,
72 name,
73 });
74 });
75 }
76
77 #[inline(always)]
79 fn pop_lock(target_value: usize) {
80 STATE.with(|state| {
81 let mut s = state.borrow_mut();
82 let Some(pos) = s.held_locks.iter().rposition(|v| v.encoded_value == target_value)
83 else {
84 panic!(
85 "LockDep: Attempted to pop a tracked lock that was not tracked. \
86 Discrepancy detected. Target Lock : {target_value}"
87 );
88 };
89 let lock = &s.held_locks[pos];
90 if lock.active_subclass_tokens > 0 {
91 let stack_str = s
92 .held_locks
93 .iter()
94 .map(|v| format!("{:X}:{}", v.encoded_value, v.active_subclass_tokens))
95 .collect::<Vec<_>>()
96 .join(", ");
97 panic!(
98 "LockDep: Attempted to drop a lock with active subclass tokens! \
99 Target: {:X}, tokens: {}, Stack: [{}]",
100 target_value, lock.active_subclass_tokens, stack_str
101 );
102 }
103 s.held_locks.remove(pos);
104 });
105 }
106
107 #[cfg(test)]
108 pub fn clear_state() {
109 STATE.with(|state| state.borrow_mut().held_locks.clear());
110 }
111
112 #[inline(always)]
116 fn get_subclass(lock_id: usize) -> u8 {
117 STATE.with(|state| {
118 let s = state.borrow();
119 if let Some(last) = s.held_locks.last() {
120 let last_lock_id = last.encoded_value & !0xF;
121 if last_lock_id == lock_id && last.active_subclass_tokens > 0 {
122 return (last.encoded_value & 0xF) as u8 + 1;
123 }
124 }
125 0
126 })
127 }
128
129 #[inline(always)]
133 fn enable_subclass_for_maximal() -> usize {
134 STATE.with(|state| {
135 let mut s = state.borrow_mut();
136 if let Some(last) = s.held_locks.last_mut() {
137 last.active_subclass_tokens += 1;
138 last.encoded_value
139 } else {
140 usize::MAX
142 }
143 })
144 }
145
146 #[inline(always)]
148 fn disable_subclass(encoded_value: usize) {
149 if encoded_value == usize::MAX {
150 return;
151 }
152 STATE.with(|state| {
153 let mut s = state.borrow_mut();
154 let Some(pos) = s.held_locks.iter().rposition(|v| v.encoded_value == encoded_value)
155 else {
156 panic!(
157 "LockDep: Attempted to disable subclass for a lock that is not on the stack! \
158 Value: {:X}",
159 encoded_value
160 );
161 };
162 let lock = &mut s.held_locks[pos];
163 if lock.active_subclass_tokens == 0 {
164 panic!(
165 "LockDep: Attempted to disable subclass for a lock with no active tokens! \
166 Value: {:X}",
167 encoded_value
168 );
169 }
170 lock.active_subclass_tokens -= 1;
171 });
172 }
173
174 pub struct LockLevelToken<L> {
177 target_value: usize,
178 _level: std::marker::PhantomData<L>,
179 }
180
181 impl<L: crate::LockLevel> LockLevelToken<L> {
182 pub fn new() -> Self {
183 let subclass = get_subclass(L::LOCK_ID);
184 assert!(subclass < 16, "subclass must be between 0 and 15");
185 let target_value = L::LOCK_ID | (subclass as usize & 0xF);
186 check_and_push_lock(target_value, L::name());
187 Self { target_value, _level: std::marker::PhantomData }
188 }
189 }
190
191 impl<L> Drop for LockLevelToken<L> {
192 fn drop(&mut self) {
193 pop_lock(self.target_value);
194 }
195 }
196
197 pub struct SubclassToken {
200 encoded_value: usize,
201 }
202
203 impl SubclassToken {
204 pub fn new() -> Self {
205 let encoded_value = enable_subclass_for_maximal();
206 Self { encoded_value }
207 }
208 }
209
210 impl Drop for SubclassToken {
211 fn drop(&mut self) {
212 disable_subclass(self.encoded_value);
213 }
214 }
215}
216
217#[cfg(not(feature = "detect_lock_dep_cycles"))]
218mod tracking {
219 pub struct LockLevelToken<L> {
220 _level: std::marker::PhantomData<L>,
221 }
222
223 impl<L: crate::LockLevel> LockLevelToken<L> {
224 #[inline(always)]
225 pub fn new() -> Self {
226 Self { _level: std::marker::PhantomData }
227 }
228 }
229
230 pub struct SubclassToken {}
231
232 impl SubclassToken {
233 #[inline(always)]
234 pub fn new() -> Self {
235 Self {}
236 }
237 }
238}
239
240pub struct LockDepMutex<T, L> {
242 inner: fuchsia_sync::Mutex<T>,
243 _level: PhantomData<L>,
244}
245
246impl<T, L: crate::LockLevel> LockDepMutex<T, L> {
247 pub const fn new(value: T) -> Self {
248 Self { inner: fuchsia_sync::Mutex::new(value), _level: PhantomData }
249 }
250
251 #[inline(always)]
252 pub fn lock(&self) -> LockDepGuard<'_, T, L> {
253 let token = tracking::LockLevelToken::new();
254 LockDepGuard { inner: self.inner.lock(), _token: token }
255 }
256}
257
258pub struct LockDepGuard<'a, T, L> {
259 inner: MutexGuard<'a, T>,
260 _token: tracking::LockLevelToken<L>,
261}
262
263impl<'a, T, L> std::ops::Deref for LockDepGuard<'a, T, L> {
264 type Target = T;
265
266 fn deref(&self) -> &Self::Target {
267 &self.inner
268 }
269}
270
271impl<'a, T, L> std::ops::DerefMut for LockDepGuard<'a, T, L> {
272 fn deref_mut(&mut self) -> &mut Self::Target {
273 &mut self.inner
274 }
275}
276
277pub struct LockDepRwLock<T, L> {
279 inner: fuchsia_sync::RwLock<T>,
280 _level: PhantomData<L>,
281}
282
283impl<T, L: crate::LockLevel> LockDepRwLock<T, L> {
284 pub const fn new(value: T) -> Self {
285 Self { inner: fuchsia_sync::RwLock::new(value), _level: PhantomData }
286 }
287
288 #[inline(always)]
289 pub fn read(&self) -> LockDepReadGuard<'_, T, L> {
290 let token = tracking::LockLevelToken::new();
291 LockDepReadGuard { inner: self.inner.read(), _token: token }
292 }
293
294 #[inline(always)]
295 pub fn write(&self) -> LockDepWriteGuard<'_, T, L> {
296 let token = tracking::LockLevelToken::new();
297 LockDepWriteGuard { inner: self.inner.write(), _token: token }
298 }
299}
300
301pub struct LockDepReadGuard<'a, T, L> {
302 inner: RwLockReadGuard<'a, T>,
303 _token: tracking::LockLevelToken<L>,
304}
305
306impl<'a, T, L> std::ops::Deref for LockDepReadGuard<'a, T, L> {
307 type Target = T;
308
309 fn deref(&self) -> &Self::Target {
310 &self.inner
311 }
312}
313
314pub struct LockDepWriteGuard<'a, T, L> {
315 inner: RwLockWriteGuard<'a, T>,
316 _token: tracking::LockLevelToken<L>,
317}
318
319impl<'a, T, L> std::ops::Deref for LockDepWriteGuard<'a, T, L> {
320 type Target = T;
321
322 fn deref(&self) -> &Self::Target {
323 &self.inner
324 }
325}
326
327impl<'a, T, L> std::ops::DerefMut for LockDepWriteGuard<'a, T, L> {
328 fn deref_mut(&mut self) -> &mut Self::Target {
329 &mut self.inner
330 }
331}
332
333pub fn allow_subclass() -> tracking::SubclassToken {
337 tracking::SubclassToken::new()
338}
339
340pub fn assert_lock_level<L: crate::LockLevel>() -> tracking::LockLevelToken<L> {
343 tracking::LockLevelToken::new()
344}
345
346#[cfg(test)]
347#[cfg(feature = "detect_lock_dep_cycles")]
348mod tests {
349 use super::*;
350 use crate::{Unlocked, lock_ordering};
351
352 lock_ordering! {
353 Unlocked => LevelA,
354 LevelA => LevelB,
355 }
356
357 #[test]
358 fn test_valid_lock_ordering() {
359 tracking::clear_state();
360 let lock_a: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
361 let lock_b: LockDepMutex<i32, LevelB> = LockDepMutex::new(0);
362
363 let _guard_a = lock_a.lock();
364 let _guard_b = lock_b.lock();
365 }
366
367 #[test]
368 fn test_subclass_no_lock() {
369 tracking::clear_state();
370 let _token1 = allow_subclass();
371 }
372
373 #[test]
374 fn test_valid_lock_subclass_ordering() {
375 tracking::clear_state();
376 let lock_a1: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
377 let lock_a2: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
378 let lock_a3: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
379
380 let _guard_a1 = lock_a1.lock();
381 let _token1 = allow_subclass();
382 let _guard_a2 = lock_a2.lock();
383 let _token2 = allow_subclass();
384 let _guard_a3 = lock_a3.lock();
385 }
386
387 #[test]
388 fn test_raii_subclass_guard() {
389 tracking::clear_state();
390 {
391 let lock_a1: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
392 let lock_a2: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
393
394 let _guard_a1 = lock_a1.lock();
395 let _token = allow_subclass();
396 let _guard_a2 = lock_a2.lock(); }
398 }
399
400 #[test]
401 fn test_subclass_guard_dropped_and_reacquired() {
402 tracking::clear_state();
403 {
404 let lock_a1: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
405 let lock_a2: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
406
407 let _guard_a1 = lock_a1.lock();
408 let _token1 = allow_subclass();
409 for _ in 0..2 {
410 let _guard_a2 = lock_a2.lock(); }
412 }
413 }
414
415 #[test]
416 fn test_multiple_subclass_same_level() {
417 tracking::clear_state();
418 let lock_a1: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
419 let lock_a2: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
420
421 let _guard_a1 = lock_a1.lock();
422 let _token1 = allow_subclass();
423 for _ in 0..2 {
424 let _token2 = allow_subclass();
425 let _guard_a2 = lock_a2.lock();
426 }
427 }
428
429 #[test]
430 #[should_panic(expected = "Subclassing not allowed or already consumed")]
431 fn test_raii_subclass_guard_limit() {
432 tracking::clear_state();
433 {
434 let lock_a1: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
435 let lock_a2: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
436 let lock_a3: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
437
438 let _guard_a1 = lock_a1.lock();
439 let _token = allow_subclass();
440 let _guard_a2 = lock_a2.lock();
441
442 let _guard_a3 = lock_a3.lock();
443 }
444 }
445
446 #[test]
447 fn test_raii_subclass_guard_multiple() {
448 tracking::clear_state();
449 {
450 let lock_a1: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
451 let lock_a2: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
452 let lock_a3: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
453
454 let _guard_a1 = lock_a1.lock();
455 let _token1 = allow_subclass();
456 let _guard_a2 = lock_a2.lock();
457
458 let _token2 = allow_subclass();
459 let _guard_a3 = lock_a3.lock(); }
461 }
462
463 #[test]
464 #[should_panic(expected = "Invalid lock ordering cycle detected")]
465 fn test_invalid_lock_ordering_cycle() {
466 tracking::clear_state();
467 {
468 let lock_a: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
469 let lock_b: LockDepMutex<i32, LevelB> = LockDepMutex::new(0);
470
471 let _guard_b = lock_b.lock();
472 let _guard_a = lock_a.lock(); }
474 }
475
476 #[test]
477 #[should_panic(expected = "LockDep: Self-deadlock detected")]
478 fn test_self_deadlock() {
479 tracking::clear_state();
480 {
481 let lock_a: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
482
483 let _guard_a1 = lock_a.lock();
484 let _guard_a2 = lock_a.lock();
485 }
486 }
487
488 #[test]
489 fn test_subclass_drop_out_of_order() {
490 tracking::clear_state();
491 let lock_a1: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
492 let lock_a2: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
493 let lock_a3: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
494
495 let _guard_a1 = lock_a1.lock();
496 let _token1 = allow_subclass();
497 let _guard_a2 = lock_a2.lock();
498 let _token2 = allow_subclass();
499 let _guard_a3 = lock_a3.lock();
500 std::mem::drop(_token2);
501 std::mem::drop(_guard_a2);
502 std::mem::drop(_guard_a3);
503 let _guard_a2 = lock_a2.lock();
504 }
505
506 #[test]
507 #[should_panic(expected = "LockDep: Attempted to drop a lock with active subclass tokens!")]
508 fn test_drop_lock_with_active_tokens() {
509 tracking::clear_state();
510 let lock_a: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
511 let guard = lock_a.lock();
512 let _token = allow_subclass();
513 std::mem::drop(guard);
514 }
515
516 #[test]
517 #[should_panic(
518 expected = "Invalid lock ordering cycle detected: attempted to acquire 'LevelA' after 'LevelB'"
519 )]
520 fn test_panic_message_contains_names() {
521 tracking::clear_state();
522 let lock_a: LockDepMutex<i32, LevelA> = LockDepMutex::new(0);
523 let lock_b: LockDepMutex<i32, LevelB> = LockDepMutex::new(0);
524
525 let _guard_b = lock_b.lock();
526 let _guard_a = lock_a.lock();
527 }
528
529 #[test]
530 #[should_panic(expected = "Invalid lock ordering cycle detected")]
531 fn test_assert_lock_level_panic() {
532 tracking::clear_state();
533 let lock_b: LockDepMutex<i32, LevelB> = LockDepMutex::new(0);
534
535 let _guard_b = lock_b.lock();
536 let _token = assert_lock_level::<LevelA>();
539 }
540}
541