selinux/
access_vector_cache.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fifo_cache::FifoCache;
6use crate::policy::{AccessDecision, IoctlAccessDecision};
7use crate::sync::Mutex;
8use crate::{AbstractObjectClass, FsNodeClass, NullessByteStr, ObjectClass, SecurityId};
9use std::sync::atomic::{AtomicU64, Ordering};
10use std::sync::{Arc, Weak};
11
12pub use crate::fifo_cache::{CacheStats, HasCacheStats};
13
14/// Interface used internally by the `SecurityServer` implementation to implement policy queries
15/// such as looking up the set of permissions to grant, or the Security Context to apply to new
16/// files, etc.
17///
18/// This trait allows layering of caching, delegation, and thread-safety between the policy-backed
19/// calculations, and the caller-facing permission-check interface.
20pub(super) trait Query {
21    /// Computes the [`AccessDecision`] permitted to `source_sid` for accessing `target_sid`, an
22    /// object of of type `target_class`.
23    fn compute_access_decision(
24        &self,
25        source_sid: SecurityId,
26        target_sid: SecurityId,
27        target_class: AbstractObjectClass,
28    ) -> AccessDecision;
29
30    /// Returns the security identifier (SID) with which to label a new `fs_node_class` instance
31    /// created by `source_sid` in a parent directory labeled `target_sid` should be labeled,
32    /// if no more specific SID was specified by `compute_new_fs_node_sid_with_name()`, based on
33    /// the file's name.
34    fn compute_new_fs_node_sid(
35        &self,
36        source_sid: SecurityId,
37        target_sid: SecurityId,
38        fs_node_class: FsNodeClass,
39    ) -> Result<SecurityId, anyhow::Error>;
40
41    /// Returns the security identifier (SID) with which to label a new `fs_node_class` instance of
42    /// name `fs_node_name`, created by `source_sid` in a parent directory labeled `target_sid`.
43    /// If no filename-transition rules exist for the specified `fs_node_name` then `None` is
44    /// returned.
45    fn compute_new_fs_node_sid_with_name(
46        &self,
47        source_sid: SecurityId,
48        target_sid: SecurityId,
49        fs_node_class: FsNodeClass,
50        fs_node_name: NullessByteStr<'_>,
51    ) -> Option<SecurityId>;
52
53    /// Computes the [`IoctlAccessDecision`] permitted to `source_sid` for accessing `target_sid`,
54    /// an object of of type `target_class`, for ioctls with high byte `ioctl_prefix`.
55    fn compute_ioctl_access_decision(
56        &self,
57        source_sid: SecurityId,
58        target_sid: SecurityId,
59        target_class: AbstractObjectClass,
60        ioctl_prefix: u8,
61    ) -> IoctlAccessDecision;
62}
63
64/// An interface for computing the rights permitted to a source accessing a target of a particular
65/// SELinux object type.
66pub trait QueryMut {
67    /// Computes the [`AccessDecision`] permitted to `source_sid` for accessing `target_sid`, an
68    /// object of type `target_class`.
69    fn compute_access_decision(
70        &mut self,
71        source_sid: SecurityId,
72        target_sid: SecurityId,
73        target_class: AbstractObjectClass,
74    ) -> AccessDecision;
75
76    /// Returns the security identifier (SID) with which to label a new `fs_node_class` instance
77    /// created by `source_sid` in a parent directory labeled `target_sid` should be labeled,
78    /// if no more specific SID was specified by `compute_new_fs_node_sid_with_name()`, based on
79    /// the file's name.
80    fn compute_new_fs_node_sid(
81        &mut self,
82        source_sid: SecurityId,
83        target_sid: SecurityId,
84        fs_node_class: FsNodeClass,
85    ) -> Result<SecurityId, anyhow::Error>;
86
87    /// Returns the security identifier (SID) with which to label a new `fs_node_class` instance of
88    /// name `fs_node_name`, created by `source_sid` in a parent directory labeled `target_sid`.
89    /// If no filename-transition rules exist for the specified `fs_node_name` then `None` is
90    /// returned.
91    fn compute_new_fs_node_sid_with_name(
92        &mut self,
93        source_sid: SecurityId,
94        target_sid: SecurityId,
95        fs_node_class: FsNodeClass,
96        fs_node_name: NullessByteStr<'_>,
97    ) -> Option<SecurityId>;
98
99    /// Computes the [`IoctlAccessDecision`] permitted to `source_sid` for accessing `target_sid`,
100    /// an object of of type `target_class`, for ioctls with high byte `ioctl_prefix`.
101    fn compute_ioctl_access_decision(
102        &mut self,
103        source_sid: SecurityId,
104        target_sid: SecurityId,
105        target_class: AbstractObjectClass,
106        ioctl_prefix: u8,
107    ) -> IoctlAccessDecision;
108}
109
110impl<Q: Query> QueryMut for Q {
111    fn compute_access_decision(
112        &mut self,
113        source_sid: SecurityId,
114        target_sid: SecurityId,
115        target_class: AbstractObjectClass,
116    ) -> AccessDecision {
117        (self as &dyn Query).compute_access_decision(source_sid, target_sid, target_class)
118    }
119
120    fn compute_new_fs_node_sid(
121        &mut self,
122        source_sid: SecurityId,
123        target_sid: SecurityId,
124        fs_node_class: FsNodeClass,
125    ) -> Result<SecurityId, anyhow::Error> {
126        (self as &dyn Query).compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
127    }
128
129    fn compute_new_fs_node_sid_with_name(
130        &mut self,
131        source_sid: SecurityId,
132        target_sid: SecurityId,
133        fs_node_class: FsNodeClass,
134        fs_node_name: NullessByteStr<'_>,
135    ) -> Option<SecurityId> {
136        (self as &dyn Query).compute_new_fs_node_sid_with_name(
137            source_sid,
138            target_sid,
139            fs_node_class,
140            fs_node_name,
141        )
142    }
143
144    fn compute_ioctl_access_decision(
145        &mut self,
146        source_sid: SecurityId,
147        target_sid: SecurityId,
148        target_class: AbstractObjectClass,
149        ioctl_prefix: u8,
150    ) -> IoctlAccessDecision {
151        (self as &dyn Query).compute_ioctl_access_decision(
152            source_sid,
153            target_sid,
154            target_class,
155            ioctl_prefix,
156        )
157    }
158}
159
160/// An interface for emptying caches that store [`Query`] input/output pairs. This interface
161/// requires implementers to update state via interior mutability.
162pub(super) trait Reset {
163    /// Removes all entries from this cache and any reset delegate caches encapsulated in this
164    /// cache. Returns true only if the cache is still valid after reset.
165    fn reset(&self) -> bool;
166}
167
168/// An interface for emptying caches that store [`Query`] input/output pairs.
169pub(super) trait ResetMut {
170    /// Removes all entries from this cache and any reset delegate caches encapsulated in this
171    /// cache. Returns true only if the cache is still valid after reset.
172    fn reset(&mut self) -> bool;
173}
174
175impl<R: Reset> ResetMut for R {
176    fn reset(&mut self) -> bool {
177        (self as &dyn Reset).reset()
178    }
179}
180
181pub(super) trait ProxyMut<D> {
182    fn set_delegate(&mut self, delegate: D) -> D;
183}
184
185/// A default implementation for [`AccessQueryable`] that permits no [`AccessVector`].
186#[derive(Default)]
187pub(super) struct DenyAll;
188
189impl Query for DenyAll {
190    fn compute_access_decision(
191        &self,
192        _source_sid: SecurityId,
193        _target_sid: SecurityId,
194        _target_class: AbstractObjectClass,
195    ) -> AccessDecision {
196        AccessDecision::default()
197    }
198
199    fn compute_new_fs_node_sid(
200        &self,
201        _source_sid: SecurityId,
202        _target_sid: SecurityId,
203        _fs_node_class: FsNodeClass,
204    ) -> Result<SecurityId, anyhow::Error> {
205        unreachable!()
206    }
207
208    fn compute_new_fs_node_sid_with_name(
209        &self,
210        _source_sid: SecurityId,
211        _target_sid: SecurityId,
212        _fs_node_class: FsNodeClass,
213        _fs_node_name: NullessByteStr<'_>,
214    ) -> Option<SecurityId> {
215        unreachable!()
216    }
217
218    fn compute_ioctl_access_decision(
219        &self,
220        _source_sid: SecurityId,
221        _target_sid: SecurityId,
222        _target_class: AbstractObjectClass,
223        _ioctl_prefix: u8,
224    ) -> IoctlAccessDecision {
225        IoctlAccessDecision::DENY_ALL
226    }
227}
228
229impl Reset for DenyAll {
230    /// A no-op implementation: [`DenyAll`] has no state to reset and no delegates to notify
231    /// when it is being treated as a cache to be reset.
232    fn reset(&self) -> bool {
233        true
234    }
235}
236
237#[derive(Clone, Hash, PartialEq, Eq)]
238struct AccessQueryArgs {
239    source_sid: SecurityId,
240    target_sid: SecurityId,
241    target_class: AbstractObjectClass,
242}
243
244#[derive(Clone)]
245struct AccessQueryResult {
246    access_decision: AccessDecision,
247    new_file_sid: Option<SecurityId>,
248}
249
250#[derive(Clone, Hash, PartialEq, Eq)]
251struct IoctlAccessQueryArgs {
252    source_sid: SecurityId,
253    target_sid: SecurityId,
254    target_class: AbstractObjectClass,
255    ioctl_prefix: u8,
256}
257
258/// An empty access vector cache that delegates to an [`AccessQueryable`].
259#[derive(Default)]
260struct Empty<D = DenyAll> {
261    delegate: D,
262}
263
264impl<D> Empty<D> {
265    /// Constructs an empty access vector cache that delegates to `delegate`.
266    ///
267    /// TODO: Eliminate `dead_code` guard.
268    #[allow(dead_code)]
269    pub fn new(delegate: D) -> Self {
270        Self { delegate }
271    }
272}
273
274impl<D: QueryMut> QueryMut for Empty<D> {
275    fn compute_access_decision(
276        &mut self,
277        source_sid: SecurityId,
278        target_sid: SecurityId,
279        target_class: AbstractObjectClass,
280    ) -> AccessDecision {
281        self.delegate.compute_access_decision(source_sid, target_sid, target_class)
282    }
283
284    fn compute_new_fs_node_sid(
285        &mut self,
286        _source_sid: SecurityId,
287        _target_sid: SecurityId,
288        _fs_node_class: FsNodeClass,
289    ) -> Result<SecurityId, anyhow::Error> {
290        unreachable!()
291    }
292
293    fn compute_new_fs_node_sid_with_name(
294        &mut self,
295        _source_sid: SecurityId,
296        _target_sid: SecurityId,
297        _fs_node_class: FsNodeClass,
298        _fs_node_name: NullessByteStr<'_>,
299    ) -> Option<SecurityId> {
300        unreachable!()
301    }
302
303    fn compute_ioctl_access_decision(
304        &mut self,
305        _source_sid: SecurityId,
306        _target_sid: SecurityId,
307        _target_class: AbstractObjectClass,
308        _ioctl_prefix: u8,
309    ) -> IoctlAccessDecision {
310        todo!()
311    }
312}
313
314impl<D: ResetMut> ResetMut for Empty<D> {
315    fn reset(&mut self) -> bool {
316        self.delegate.reset()
317    }
318}
319
320/// Thread-hostile associative cache with capacity defined at construction and FIFO eviction.
321pub(super) struct FifoQueryCache<D = DenyAll> {
322    access_cache: FifoCache<AccessQueryArgs, AccessQueryResult>,
323    ioctl_access_cache: FifoCache<IoctlAccessQueryArgs, IoctlAccessDecision>,
324    delegate: D,
325}
326
327impl<D> FifoQueryCache<D> {
328    // The multiplier used to compute the ioctl access cache capacity from the main cache capacity.
329    const IOCTL_CAPACITY_MULTIPLIER: f32 = 0.25;
330
331    /// Constructs a fixed-size access vector cache that delegates to `delegate`.
332    ///
333    /// # Panics
334    ///
335    /// This will panic if called with a `capacity` of zero.
336    pub fn new(delegate: D, capacity: usize) -> Self {
337        assert!(capacity > 0, "cannot instantiate fixed access vector cache of size 0");
338        let ioctl_access_cache_capacity =
339            (Self::IOCTL_CAPACITY_MULTIPLIER * (capacity as f32)) as usize;
340        assert!(
341            ioctl_access_cache_capacity > 0,
342            "cannot instantiate ioctl cache partition of size 0"
343        );
344
345        Self {
346            // Request `capacity` plus one element working-space for insertions that trigger
347            // an eviction.
348            access_cache: FifoCache::with_capacity(capacity),
349            ioctl_access_cache: FifoCache::with_capacity(ioctl_access_cache_capacity),
350            delegate,
351        }
352    }
353
354    /// Returns true if the main access decision cache has reached capacity.
355    #[cfg(test)]
356    fn access_cache_is_full(&self) -> bool {
357        self.access_cache.is_full()
358    }
359
360    /// Returns true if the ioctl access decision cache has reached capacity.
361    #[cfg(test)]
362    fn ioctl_access_cache_is_full(&self) -> bool {
363        self.ioctl_access_cache.is_full()
364    }
365}
366
367impl<D: QueryMut> QueryMut for FifoQueryCache<D> {
368    fn compute_access_decision(
369        &mut self,
370        source_sid: SecurityId,
371        target_sid: SecurityId,
372        target_class: AbstractObjectClass,
373    ) -> AccessDecision {
374        let query_args =
375            AccessQueryArgs { source_sid, target_sid, target_class: target_class.clone() };
376        if let Some(result) = self.access_cache.get(&query_args) {
377            return result.access_decision.clone();
378        }
379
380        let access_decision =
381            self.delegate.compute_access_decision(source_sid, target_sid, target_class);
382
383        self.access_cache.insert(
384            query_args,
385            AccessQueryResult { access_decision: access_decision.clone(), new_file_sid: None },
386        );
387
388        access_decision
389    }
390
391    fn compute_new_fs_node_sid(
392        &mut self,
393        source_sid: SecurityId,
394        target_sid: SecurityId,
395        fs_node_class: FsNodeClass,
396    ) -> Result<SecurityId, anyhow::Error> {
397        let target_class = AbstractObjectClass::System(ObjectClass::from(fs_node_class));
398
399        let query_args =
400            AccessQueryArgs { source_sid, target_sid, target_class: target_class.clone() };
401        let query_result = if let Some(result) = self.access_cache.get(&query_args) {
402            result
403        } else {
404            let access_decision =
405                self.delegate.compute_access_decision(source_sid, target_sid, target_class);
406            self.access_cache
407                .insert(query_args, AccessQueryResult { access_decision, new_file_sid: None })
408        };
409
410        if let Some(new_file_sid) = query_result.new_file_sid {
411            Ok(new_file_sid)
412        } else {
413            let new_file_sid =
414                self.delegate.compute_new_fs_node_sid(source_sid, target_sid, fs_node_class);
415            if let Ok(new_file_sid) = new_file_sid {
416                query_result.new_file_sid = Some(new_file_sid);
417            }
418            new_file_sid
419        }
420    }
421
422    fn compute_new_fs_node_sid_with_name(
423        &mut self,
424        source_sid: SecurityId,
425        target_sid: SecurityId,
426        fs_node_class: FsNodeClass,
427        fs_node_name: NullessByteStr<'_>,
428    ) -> Option<SecurityId> {
429        self.delegate.compute_new_fs_node_sid_with_name(
430            source_sid,
431            target_sid,
432            fs_node_class,
433            fs_node_name,
434        )
435    }
436
437    fn compute_ioctl_access_decision(
438        &mut self,
439        source_sid: SecurityId,
440        target_sid: SecurityId,
441        target_class: AbstractObjectClass,
442        ioctl_prefix: u8,
443    ) -> IoctlAccessDecision {
444        let query_args = IoctlAccessQueryArgs {
445            source_sid,
446            target_sid,
447            target_class: target_class.clone(),
448            ioctl_prefix,
449        };
450        if let Some(result) = self.ioctl_access_cache.get(&query_args) {
451            return result.clone();
452        }
453
454        let ioctl_access_decision = self.delegate.compute_ioctl_access_decision(
455            source_sid,
456            target_sid,
457            target_class,
458            ioctl_prefix,
459        );
460
461        self.ioctl_access_cache.insert(query_args, ioctl_access_decision.clone());
462
463        ioctl_access_decision
464    }
465}
466
467impl<D> HasCacheStats for FifoQueryCache<D> {
468    fn cache_stats(&self) -> CacheStats {
469        &self.access_cache.cache_stats() + &self.ioctl_access_cache.cache_stats()
470    }
471}
472
473impl<D> ResetMut for FifoQueryCache<D> {
474    fn reset(&mut self) -> bool {
475        self.access_cache = FifoCache::with_capacity(self.access_cache.capacity());
476        self.ioctl_access_cache = FifoCache::with_capacity(self.ioctl_access_cache.capacity());
477        true
478    }
479}
480
481impl<D> ProxyMut<D> for FifoQueryCache<D> {
482    fn set_delegate(&mut self, mut delegate: D) -> D {
483        std::mem::swap(&mut self.delegate, &mut delegate);
484        delegate
485    }
486}
487
488/// A locked access vector cache.
489pub(super) struct Locked<D = DenyAll> {
490    delegate: Arc<Mutex<D>>,
491}
492
493impl<D> Clone for Locked<D> {
494    fn clone(&self) -> Self {
495        Self { delegate: self.delegate.clone() }
496    }
497}
498
499impl<D> Locked<D> {
500    /// Constructs a locked access vector cache that delegates to `delegate`.
501    pub fn new(delegate: D) -> Self {
502        Self { delegate: Arc::new(Mutex::new(delegate)) }
503    }
504}
505
506impl<D: QueryMut> Query for Locked<D> {
507    fn compute_access_decision(
508        &self,
509        source_sid: SecurityId,
510        target_sid: SecurityId,
511        target_class: AbstractObjectClass,
512    ) -> AccessDecision {
513        self.delegate.lock().compute_access_decision(source_sid, target_sid, target_class)
514    }
515
516    fn compute_new_fs_node_sid(
517        &self,
518        source_sid: SecurityId,
519        target_sid: SecurityId,
520        fs_node_class: FsNodeClass,
521    ) -> Result<SecurityId, anyhow::Error> {
522        self.delegate.lock().compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
523    }
524
525    fn compute_new_fs_node_sid_with_name(
526        &self,
527        source_sid: SecurityId,
528        target_sid: SecurityId,
529        fs_node_class: FsNodeClass,
530        fs_node_name: NullessByteStr<'_>,
531    ) -> Option<SecurityId> {
532        self.delegate.lock().compute_new_fs_node_sid_with_name(
533            source_sid,
534            target_sid,
535            fs_node_class,
536            fs_node_name,
537        )
538    }
539
540    fn compute_ioctl_access_decision(
541        &self,
542        source_sid: SecurityId,
543        target_sid: SecurityId,
544        target_class: AbstractObjectClass,
545        ioctl_prefix: u8,
546    ) -> IoctlAccessDecision {
547        self.delegate.lock().compute_ioctl_access_decision(
548            source_sid,
549            target_sid,
550            target_class,
551            ioctl_prefix,
552        )
553    }
554}
555
556impl<D: HasCacheStats> HasCacheStats for Locked<D> {
557    fn cache_stats(&self) -> CacheStats {
558        self.delegate.lock().cache_stats()
559    }
560}
561
562impl<D: ResetMut> Reset for Locked<D> {
563    fn reset(&self) -> bool {
564        self.delegate.lock().reset()
565    }
566}
567
568impl<D> Locked<D> {
569    pub fn set_stateful_cache_delegate<PD>(&self, delegate: PD) -> PD
570    where
571        D: ProxyMut<PD>,
572    {
573        self.delegate.lock().set_delegate(delegate)
574    }
575}
576
577/// A wrapper around an atomic integer that implements [`Reset`]. Instances of this type are used as
578/// a version number to indicate when a cache needs to be emptied.
579#[derive(Default)]
580pub struct AtomicVersion(AtomicU64);
581
582impl AtomicVersion {
583    /// Atomically load the version number.
584    pub fn version(&self) -> u64 {
585        self.0.load(Ordering::Relaxed)
586    }
587
588    /// Atomically increment the version number.
589    pub fn increment_version(&self) {
590        self.0.fetch_add(1, Ordering::Relaxed);
591    }
592}
593
594impl Reset for AtomicVersion {
595    fn reset(&self) -> bool {
596        self.increment_version();
597        true
598    }
599}
600
601impl<Q: Query> Query for Arc<Q> {
602    fn compute_access_decision(
603        &self,
604        source_sid: SecurityId,
605        target_sid: SecurityId,
606        target_class: AbstractObjectClass,
607    ) -> AccessDecision {
608        self.as_ref().compute_access_decision(source_sid, target_sid, target_class)
609    }
610
611    fn compute_new_fs_node_sid(
612        &self,
613        source_sid: SecurityId,
614        target_sid: SecurityId,
615        fs_node_class: FsNodeClass,
616    ) -> Result<SecurityId, anyhow::Error> {
617        self.as_ref().compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
618    }
619
620    fn compute_new_fs_node_sid_with_name(
621        &self,
622        source_sid: SecurityId,
623        target_sid: SecurityId,
624        fs_node_class: FsNodeClass,
625        fs_node_name: NullessByteStr<'_>,
626    ) -> Option<SecurityId> {
627        self.as_ref().compute_new_fs_node_sid_with_name(
628            source_sid,
629            target_sid,
630            fs_node_class,
631            fs_node_name,
632        )
633    }
634
635    fn compute_ioctl_access_decision(
636        &self,
637        source_sid: SecurityId,
638        target_sid: SecurityId,
639        target_class: AbstractObjectClass,
640        ioctl_prefix: u8,
641    ) -> IoctlAccessDecision {
642        self.as_ref().compute_ioctl_access_decision(
643            source_sid,
644            target_sid,
645            target_class,
646            ioctl_prefix,
647        )
648    }
649}
650
651impl<R: Reset> Reset for Arc<R> {
652    fn reset(&self) -> bool {
653        self.as_ref().reset()
654    }
655}
656
657impl<Q: Query> Query for Weak<Q> {
658    fn compute_access_decision(
659        &self,
660        source_sid: SecurityId,
661        target_sid: SecurityId,
662        target_class: AbstractObjectClass,
663    ) -> AccessDecision {
664        self.upgrade()
665            .map(|q| q.compute_access_decision(source_sid, target_sid, target_class))
666            .unwrap_or_default()
667    }
668
669    fn compute_new_fs_node_sid(
670        &self,
671        source_sid: SecurityId,
672        target_sid: SecurityId,
673        fs_node_class: FsNodeClass,
674    ) -> Result<SecurityId, anyhow::Error> {
675        self.upgrade()
676            .map(|q| q.compute_new_fs_node_sid(source_sid, target_sid, fs_node_class))
677            .unwrap_or(Err(anyhow::anyhow!("weak reference failed to resolve")))
678    }
679
680    fn compute_new_fs_node_sid_with_name(
681        &self,
682        source_sid: SecurityId,
683        target_sid: SecurityId,
684        fs_node_class: FsNodeClass,
685        fs_node_name: NullessByteStr<'_>,
686    ) -> Option<SecurityId> {
687        let delegate = self.upgrade()?;
688        delegate.compute_new_fs_node_sid_with_name(
689            source_sid,
690            target_sid,
691            fs_node_class,
692            fs_node_name,
693        )
694    }
695
696    fn compute_ioctl_access_decision(
697        &self,
698        source_sid: SecurityId,
699        target_sid: SecurityId,
700        target_class: AbstractObjectClass,
701        ioctl_prefix: u8,
702    ) -> IoctlAccessDecision {
703        self.upgrade()
704            .map(|q| {
705                q.compute_ioctl_access_decision(source_sid, target_sid, target_class, ioctl_prefix)
706            })
707            .unwrap_or(IoctlAccessDecision::DENY_ALL)
708    }
709}
710
711impl<R: Reset> Reset for Weak<R> {
712    fn reset(&self) -> bool {
713        self.upgrade().as_deref().map(Reset::reset).unwrap_or(false)
714    }
715}
716
717/// An access vector cache that may be reset from any thread, but expects to always be queried
718/// from the same thread. The cache does not implement any specific caching strategies, but
719/// delegates *all* operations.
720///
721/// Resets are delegated lazily during queries.  A `reset()` induces an internal state change that
722/// results in at most one `reset()` call to the query delegate on the next query. This strategy
723/// allows [`ThreadLocalQuery`] to expose thread-safe reset implementation over thread-hostile
724/// access vector cache implementations.
725pub(super) struct ThreadLocalQuery<D = DenyAll> {
726    delegate: D,
727    current_version: u64,
728    active_version: Arc<AtomicVersion>,
729}
730
731impl<D> ThreadLocalQuery<D> {
732    /// Constructs a [`ThreadLocalQuery`] that delegates to `delegate`.
733    pub fn new(active_version: Arc<AtomicVersion>, delegate: D) -> Self {
734        Self { delegate, current_version: Default::default(), active_version }
735    }
736}
737
738impl<D: QueryMut + ResetMut> QueryMut for ThreadLocalQuery<D> {
739    fn compute_access_decision(
740        &mut self,
741        source_sid: SecurityId,
742        target_sid: SecurityId,
743        target_class: AbstractObjectClass,
744    ) -> AccessDecision {
745        let version = self.active_version.as_ref().version();
746        if self.current_version != version {
747            self.current_version = version;
748            self.delegate.reset();
749        }
750
751        // Allow `self.delegate` to implement caching strategy and prepare response.
752        self.delegate.compute_access_decision(source_sid, target_sid, target_class)
753    }
754
755    fn compute_new_fs_node_sid(
756        &mut self,
757        source_sid: SecurityId,
758        target_sid: SecurityId,
759        fs_node_class: FsNodeClass,
760    ) -> Result<SecurityId, anyhow::Error> {
761        let version = self.active_version.as_ref().version();
762        if self.current_version != version {
763            self.current_version = version;
764            self.delegate.reset();
765        }
766
767        // Allow `self.delegate` to implement caching strategy and prepare response.
768        self.delegate.compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
769    }
770
771    fn compute_new_fs_node_sid_with_name(
772        &mut self,
773        source_sid: SecurityId,
774        target_sid: SecurityId,
775        fs_node_class: FsNodeClass,
776        fs_node_name: NullessByteStr<'_>,
777    ) -> Option<SecurityId> {
778        // Allow `self.delegate` to implement caching strategy and prepare response.
779        self.delegate.compute_new_fs_node_sid_with_name(
780            source_sid,
781            target_sid,
782            fs_node_class,
783            fs_node_name,
784        )
785    }
786
787    fn compute_ioctl_access_decision(
788        &mut self,
789        source_sid: SecurityId,
790        target_sid: SecurityId,
791        target_class: AbstractObjectClass,
792        ioctl_prefix: u8,
793    ) -> IoctlAccessDecision {
794        self.delegate.compute_ioctl_access_decision(
795            source_sid,
796            target_sid,
797            target_class,
798            ioctl_prefix,
799        )
800    }
801}
802
803/// Default size of an access vector cache shared by all threads in the system.
804const DEFAULT_SHARED_SIZE: usize = 1000;
805
806/// Default size of a thread-local access vector cache.
807const DEFAULT_THREAD_LOCAL_SIZE: usize = 10;
808
809/// Composite access vector cache manager that delegates queries to security server type, `SS`, and
810/// owns a shared cache of size `DEFAULT_SHARED_SIZE`, and can produce thread-local caches of size
811/// `DEFAULT_THREAD_LOCAL_SIZE`.
812pub(super) struct Manager<SS> {
813    shared_cache: Locked<FifoQueryCache<Weak<SS>>>,
814    thread_local_version: Arc<AtomicVersion>,
815}
816
817impl<SS> Manager<SS> {
818    /// Constructs a [`Manager`] that initially has no security server delegate (i.e., will default
819    /// to deny all requests).
820    pub fn new() -> Self {
821        Self {
822            shared_cache: Locked::new(FifoQueryCache::new(Weak::<SS>::new(), DEFAULT_SHARED_SIZE)),
823            thread_local_version: Arc::new(AtomicVersion::default()),
824        }
825    }
826
827    /// Sets the security server delegate that is consulted when there is no cache hit on a query.
828    pub fn set_security_server(&self, security_server: Weak<SS>) -> Weak<SS> {
829        self.shared_cache.set_stateful_cache_delegate(security_server)
830    }
831
832    /// Returns a shared reference to the shared cache managed by this manager. This operation does
833    /// not copy the cache, but it does perform an atomic operation to update a reference count.
834    pub fn get_shared_cache(&self) -> &Locked<FifoQueryCache<Weak<SS>>> {
835        &self.shared_cache
836    }
837
838    /// Constructs a new thread-local cache that will delegate to the shared cache managed by this
839    /// manager (which, in turn, delegates to its security server).
840    pub fn new_thread_local_cache(
841        &self,
842    ) -> ThreadLocalQuery<FifoQueryCache<Locked<FifoQueryCache<Weak<SS>>>>> {
843        ThreadLocalQuery::new(
844            self.thread_local_version.clone(),
845            FifoQueryCache::new(self.shared_cache.clone(), DEFAULT_THREAD_LOCAL_SIZE),
846        )
847    }
848}
849
850impl<SS> Reset for Manager<SS> {
851    /// Resets caches owned by this manager. If owned caches delegate to a security server that is
852    /// reloading its policy, the security server must reload its policy (and start serving the new
853    /// policy) *before* invoking `Manager::reset()` on any managers that delegate to that security
854    /// server. This is because the [`Manager`]-managed caches are consulted by [`Query`] clients
855    /// *before* the security server; performing reload/reset in the reverse order could move stale
856    /// queries into reset caches before policy reload is complete.
857    fn reset(&self) -> bool {
858        // Layered cache stale entries avoided only if shared cache reset first, then thread-local
859        // caches are reset. This is because thread-local caches are consulted by `Query` clients
860        // before the shared cache; performing reset in the reverse order could move stale queries
861        // into reset caches.
862        self.shared_cache.reset();
863        self.thread_local_version.reset();
864        true
865    }
866}
867
868/// Test constants and helpers shared by `tests` and `starnix_tests`.
869#[cfg(test)]
870mod testing {
871    use crate::SecurityId;
872
873    use std::num::NonZeroU32;
874    use std::sync::atomic::{AtomicU32, Ordering};
875    use std::sync::LazyLock;
876
877    /// SID to use where any value will do.
878    pub(super) static A_TEST_SID: LazyLock<SecurityId> = LazyLock::new(unique_sid);
879
880    /// Default fixed cache capacity to request in tests.
881    pub(super) const TEST_CAPACITY: usize = 10;
882
883    /// Returns a new `SecurityId` with unique id.
884    pub(super) fn unique_sid() -> SecurityId {
885        static NEXT_ID: AtomicU32 = AtomicU32::new(1000);
886        SecurityId(NonZeroU32::new(NEXT_ID.fetch_add(1, Ordering::AcqRel)).unwrap())
887    }
888
889    /// Returns a vector of `count` unique `SecurityIds`.
890    pub(super) fn unique_sids(count: usize) -> Vec<SecurityId> {
891        (0..count).map(|_| unique_sid()).collect()
892    }
893}
894
895#[cfg(test)]
896mod tests {
897    use super::testing::*;
898    use super::*;
899    use crate::policy::{AccessVector, XpermsBitmap};
900    use crate::ObjectClass;
901
902    use std::sync::atomic::AtomicUsize;
903
904    #[derive(Default)]
905    struct Counter<D = DenyAll> {
906        query_count: AtomicUsize,
907        reset_count: AtomicUsize,
908        delegate: D,
909    }
910
911    impl<D> Counter<D> {
912        fn query_count(&self) -> usize {
913            self.query_count.load(Ordering::Relaxed)
914        }
915
916        fn reset_count(&self) -> usize {
917            self.reset_count.load(Ordering::Relaxed)
918        }
919    }
920
921    impl<D: Query> Query for Counter<D> {
922        fn compute_access_decision(
923            &self,
924            source_sid: SecurityId,
925            target_sid: SecurityId,
926            target_class: AbstractObjectClass,
927        ) -> AccessDecision {
928            self.query_count.fetch_add(1, Ordering::Relaxed);
929            self.delegate.compute_access_decision(source_sid, target_sid, target_class)
930        }
931
932        fn compute_new_fs_node_sid(
933            &self,
934            _source_sid: SecurityId,
935            _target_sid: SecurityId,
936            _fs_node_class: FsNodeClass,
937        ) -> Result<SecurityId, anyhow::Error> {
938            unreachable!()
939        }
940
941        fn compute_new_fs_node_sid_with_name(
942            &self,
943            _source_sid: SecurityId,
944            _target_sid: SecurityId,
945            _fs_node_class: FsNodeClass,
946            _fs_node_name: NullessByteStr<'_>,
947        ) -> Option<SecurityId> {
948            unreachable!()
949        }
950
951        fn compute_ioctl_access_decision(
952            &self,
953            source_sid: SecurityId,
954            target_sid: SecurityId,
955            target_class: AbstractObjectClass,
956            ioctl_prefix: u8,
957        ) -> IoctlAccessDecision {
958            self.query_count.fetch_add(1, Ordering::Relaxed);
959            self.delegate.compute_ioctl_access_decision(
960                source_sid,
961                target_sid,
962                target_class,
963                ioctl_prefix,
964            )
965        }
966    }
967
968    impl<D: Reset> Reset for Counter<D> {
969        fn reset(&self) -> bool {
970            self.reset_count.fetch_add(1, Ordering::Relaxed);
971            self.delegate.reset();
972            true
973        }
974    }
975
976    #[test]
977    fn empty_access_vector_cache_default_deny_all() {
978        let mut avc = Empty::<DenyAll>::default();
979        assert_eq!(
980            AccessVector::NONE,
981            avc.compute_access_decision(
982                A_TEST_SID.clone(),
983                A_TEST_SID.clone(),
984                ObjectClass::Process.into()
985            )
986            .allow
987        );
988    }
989
990    #[test]
991    fn fixed_access_vector_cache_add_entry() {
992        let mut avc = FifoQueryCache::<_>::new(Counter::<DenyAll>::default(), TEST_CAPACITY);
993        assert_eq!(0, avc.delegate.query_count());
994        assert_eq!(
995            AccessVector::NONE,
996            avc.compute_access_decision(
997                A_TEST_SID.clone(),
998                A_TEST_SID.clone(),
999                ObjectClass::Process.into()
1000            )
1001            .allow
1002        );
1003        assert_eq!(1, avc.delegate.query_count());
1004        assert_eq!(
1005            AccessVector::NONE,
1006            avc.compute_access_decision(
1007                A_TEST_SID.clone(),
1008                A_TEST_SID.clone(),
1009                ObjectClass::Process.into()
1010            )
1011            .allow
1012        );
1013        assert_eq!(1, avc.delegate.query_count());
1014        assert_eq!(false, avc.access_cache_is_full());
1015    }
1016
1017    #[test]
1018    fn fixed_access_vector_cache_reset() {
1019        let mut avc = FifoQueryCache::<_>::new(Counter::<DenyAll>::default(), TEST_CAPACITY);
1020
1021        avc.reset();
1022        assert_eq!(false, avc.access_cache_is_full());
1023
1024        assert_eq!(0, avc.delegate.query_count());
1025        assert_eq!(
1026            AccessVector::NONE,
1027            avc.compute_access_decision(
1028                A_TEST_SID.clone(),
1029                A_TEST_SID.clone(),
1030                ObjectClass::Process.into()
1031            )
1032            .allow
1033        );
1034        assert_eq!(1, avc.delegate.query_count());
1035        assert_eq!(false, avc.access_cache_is_full());
1036
1037        avc.reset();
1038        assert_eq!(false, avc.access_cache_is_full());
1039    }
1040
1041    #[test]
1042    fn fixed_access_vector_cache_fill() {
1043        let mut avc = FifoQueryCache::<_>::new(Counter::<DenyAll>::default(), TEST_CAPACITY);
1044
1045        for sid in unique_sids(avc.access_cache.capacity()) {
1046            avc.compute_access_decision(sid, A_TEST_SID.clone(), ObjectClass::Process.into());
1047        }
1048        assert_eq!(true, avc.access_cache_is_full());
1049
1050        avc.reset();
1051        assert_eq!(false, avc.access_cache_is_full());
1052
1053        for sid in unique_sids(avc.access_cache.capacity()) {
1054            avc.compute_access_decision(A_TEST_SID.clone(), sid, ObjectClass::Process.into());
1055        }
1056        assert_eq!(true, avc.access_cache_is_full());
1057
1058        avc.reset();
1059        assert_eq!(false, avc.access_cache_is_full());
1060    }
1061
1062    #[test]
1063    fn fixed_access_vector_cache_full_miss() {
1064        let mut avc = FifoQueryCache::<_>::new(Counter::<DenyAll>::default(), TEST_CAPACITY);
1065
1066        // Make the test query, which will trivially miss.
1067        avc.compute_access_decision(
1068            A_TEST_SID.clone(),
1069            A_TEST_SID.clone(),
1070            ObjectClass::Process.into(),
1071        );
1072        assert!(!avc.access_cache_is_full());
1073
1074        // Fill the cache with new queries, which should evict the test query.
1075        for sid in unique_sids(avc.access_cache.capacity()) {
1076            avc.compute_access_decision(sid, A_TEST_SID.clone(), ObjectClass::Process.into());
1077        }
1078        assert!(avc.access_cache_is_full());
1079
1080        // Making the test query should result in another miss.
1081        let delegate_query_count = avc.delegate.query_count();
1082        avc.compute_access_decision(
1083            A_TEST_SID.clone(),
1084            A_TEST_SID.clone(),
1085            ObjectClass::Process.into(),
1086        );
1087        assert_eq!(delegate_query_count + 1, avc.delegate.query_count());
1088
1089        // Because the cache is not LRU, making `capacity()` unique queries, each preceded by
1090        // the test query, will still result in the test query result being evicted.
1091        // Each test query will hit, and the interleaved queries will miss, with the final of the
1092        // interleaved queries evicting the test query.
1093        for sid in unique_sids(avc.access_cache.capacity()) {
1094            avc.compute_access_decision(
1095                A_TEST_SID.clone(),
1096                A_TEST_SID.clone(),
1097                ObjectClass::Process.into(),
1098            );
1099            avc.compute_access_decision(sid, A_TEST_SID.clone(), ObjectClass::Process.into());
1100        }
1101
1102        // The test query should now miss.
1103        let delegate_query_count = avc.delegate.query_count();
1104        avc.compute_access_decision(
1105            A_TEST_SID.clone(),
1106            A_TEST_SID.clone(),
1107            ObjectClass::Process.into(),
1108        );
1109        assert_eq!(delegate_query_count + 1, avc.delegate.query_count());
1110    }
1111
1112    #[test]
1113    fn thread_local_query_access_vector_cache_reset() {
1114        let cache_version = Arc::new(AtomicVersion::default());
1115        let mut avc = ThreadLocalQuery::new(cache_version.clone(), Counter::<DenyAll>::default());
1116
1117        // Reset deferred to next query.
1118        assert_eq!(0, avc.delegate.reset_count());
1119        cache_version.reset();
1120        assert_eq!(0, avc.delegate.reset_count());
1121        avc.compute_access_decision(
1122            A_TEST_SID.clone(),
1123            A_TEST_SID.clone(),
1124            ObjectClass::Process.into(),
1125        );
1126        assert_eq!(1, avc.delegate.reset_count());
1127    }
1128
1129    #[test]
1130    fn access_vector_cache_ioctl_hit() {
1131        let mut avc = FifoQueryCache::<_>::new(Counter::<DenyAll>::default(), TEST_CAPACITY);
1132        assert_eq!(0, avc.delegate.query_count());
1133        assert_eq!(
1134            XpermsBitmap::NONE,
1135            avc.compute_ioctl_access_decision(
1136                A_TEST_SID.clone(),
1137                A_TEST_SID.clone(),
1138                ObjectClass::Process.into(),
1139                0x0,
1140            )
1141            .allow
1142        );
1143        assert_eq!(1, avc.delegate.query_count());
1144        // The second request for the same key is a cache hit.
1145        assert_eq!(
1146            XpermsBitmap::NONE,
1147            avc.compute_ioctl_access_decision(
1148                A_TEST_SID.clone(),
1149                A_TEST_SID.clone(),
1150                ObjectClass::Process.into(),
1151                0x0
1152            )
1153            .allow
1154        );
1155        assert_eq!(1, avc.delegate.query_count());
1156    }
1157
1158    #[test]
1159    fn access_vector_cache_ioctl_miss() {
1160        let mut avc = FifoQueryCache::<_>::new(Counter::<DenyAll>::default(), TEST_CAPACITY);
1161
1162        // Make the test query, which will trivially miss.
1163        avc.compute_ioctl_access_decision(
1164            A_TEST_SID.clone(),
1165            A_TEST_SID.clone(),
1166            ObjectClass::Process.into(),
1167            0x0,
1168        );
1169
1170        // Fill the ioctl cache with new queries, which should evict the test query.
1171        for ioctl_prefix in 0x1..(1 + avc.ioctl_access_cache.capacity())
1172            .try_into()
1173            .expect("assumed that test ioctl cache capacity was < 255")
1174        {
1175            avc.compute_ioctl_access_decision(
1176                A_TEST_SID.clone(),
1177                A_TEST_SID.clone(),
1178                ObjectClass::Process.into(),
1179                ioctl_prefix,
1180            );
1181        }
1182        // Make sure that we've fulfilled at least one new cache miss since the original test query,
1183        // and that the cache is now full.
1184        assert!(avc.delegate.query_count() > 1);
1185        assert!(avc.ioctl_access_cache_is_full());
1186        let delegate_query_count = avc.delegate.query_count();
1187
1188        // Making the original test query again should result in another miss.
1189        avc.compute_ioctl_access_decision(
1190            A_TEST_SID.clone(),
1191            A_TEST_SID.clone(),
1192            ObjectClass::Process.into(),
1193            0x0,
1194        );
1195        assert_eq!(delegate_query_count + 1, avc.delegate.query_count());
1196    }
1197}
1198
1199/// Async tests that depend on `fuchsia::test` only run in starnix.
1200#[cfg(test)]
1201#[cfg(feature = "selinux_starnix")]
1202mod starnix_tests {
1203    use super::testing::*;
1204    use super::*;
1205    use crate::policy::testing::{ACCESS_VECTOR_0001, ACCESS_VECTOR_0010};
1206    use crate::policy::AccessVector;
1207    use crate::ObjectClass;
1208
1209    use rand::distributions::Uniform;
1210    use rand::{thread_rng, Rng as _};
1211    use std::collections::{HashMap, HashSet};
1212    use std::sync::atomic::AtomicU32;
1213    use std::thread::spawn;
1214
1215    const NO_RIGHTS: u32 = 0;
1216    const READ_RIGHTS: u32 = 1;
1217    const WRITE_RIGHTS: u32 = 2;
1218
1219    const ACCESS_VECTOR_READ: AccessDecision = AccessDecision::allow(ACCESS_VECTOR_0001);
1220    const ACCESS_VECTOR_WRITE: AccessDecision = AccessDecision::allow(ACCESS_VECTOR_0010);
1221
1222    struct PolicyServer {
1223        policy: Arc<AtomicU32>,
1224    }
1225
1226    impl PolicyServer {
1227        fn set_policy(&self, policy: u32) {
1228            if policy > 2 {
1229                panic!("attempt to set policy to invalid value: {}", policy);
1230            }
1231            self.policy.as_ref().store(policy, Ordering::Relaxed);
1232        }
1233    }
1234
1235    impl Query for PolicyServer {
1236        fn compute_access_decision(
1237            &self,
1238            _source_sid: SecurityId,
1239            _target_sid: SecurityId,
1240            _target_class: AbstractObjectClass,
1241        ) -> AccessDecision {
1242            let policy = self.policy.as_ref().load(Ordering::Relaxed);
1243            if policy == NO_RIGHTS {
1244                AccessDecision::default()
1245            } else if policy == READ_RIGHTS {
1246                ACCESS_VECTOR_READ
1247            } else if policy == WRITE_RIGHTS {
1248                ACCESS_VECTOR_WRITE
1249            } else {
1250                panic!("compute_access_decision found invalid policy: {}", policy);
1251            }
1252        }
1253
1254        fn compute_new_fs_node_sid(
1255            &self,
1256            _source_sid: SecurityId,
1257            _target_sid: SecurityId,
1258            _fs_node_class: FsNodeClass,
1259        ) -> Result<SecurityId, anyhow::Error> {
1260            unreachable!()
1261        }
1262
1263        fn compute_new_fs_node_sid_with_name(
1264            &self,
1265            _source_sid: SecurityId,
1266            _target_sid: SecurityId,
1267            _fs_node_class: FsNodeClass,
1268            _fs_node_name: NullessByteStr<'_>,
1269        ) -> Option<SecurityId> {
1270            unreachable!()
1271        }
1272
1273        fn compute_ioctl_access_decision(
1274            &self,
1275            _source_sid: SecurityId,
1276            _target_sid: SecurityId,
1277            _target_class: AbstractObjectClass,
1278            _ioctl_prefix: u8,
1279        ) -> IoctlAccessDecision {
1280            todo!()
1281        }
1282    }
1283
1284    impl Reset for PolicyServer {
1285        fn reset(&self) -> bool {
1286            true
1287        }
1288    }
1289
1290    #[fuchsia::test]
1291    async fn thread_local_query_access_vector_cache_coherence() {
1292        for _ in 0..TEST_CAPACITY {
1293            test_thread_local_query_access_vector_cache_coherence().await
1294        }
1295    }
1296
1297    /// Tests cache coherence over two policy changes over a [`ThreadLocalQuery`].
1298    async fn test_thread_local_query_access_vector_cache_coherence() {
1299        let active_policy: Arc<AtomicU32> = Arc::new(Default::default());
1300        let policy_server: Arc<PolicyServer> =
1301            Arc::new(PolicyServer { policy: active_policy.clone() });
1302        let cache_version = Arc::new(AtomicVersion::default());
1303
1304        let fixed_avc = FifoQueryCache::<_>::new(policy_server.clone(), TEST_CAPACITY);
1305        let cache_version_for_avc = cache_version.clone();
1306        let mut query_avc = ThreadLocalQuery::new(cache_version_for_avc, fixed_avc);
1307
1308        policy_server.set_policy(NO_RIGHTS);
1309        let (tx, rx) = futures::channel::oneshot::channel();
1310        let query_thread = spawn(move || {
1311            let mut trace = vec![];
1312
1313            for _ in 0..2000 {
1314                trace.push(query_avc.compute_access_decision(
1315                    A_TEST_SID.clone(),
1316                    A_TEST_SID.clone(),
1317                    ObjectClass::Process.into(),
1318                ))
1319            }
1320
1321            tx.send(trace).expect("send trace");
1322        });
1323
1324        let policy_server = PolicyServer { policy: active_policy.clone() };
1325        let cache_version_for_read = cache_version.clone();
1326        let set_read_thread = spawn(move || {
1327            std::thread::sleep(std::time::Duration::from_micros(1));
1328            policy_server.set_policy(READ_RIGHTS);
1329            cache_version_for_read.reset();
1330        });
1331
1332        let policy_server = PolicyServer { policy: active_policy.clone() };
1333        let cache_version_for_write = cache_version;
1334        let set_write_thread = spawn(move || {
1335            std::thread::sleep(std::time::Duration::from_micros(2));
1336            policy_server.set_policy(WRITE_RIGHTS);
1337            cache_version_for_write.reset();
1338        });
1339
1340        set_read_thread.join().expect("join set-policy-to-read");
1341        set_write_thread.join().expect("join set-policy-to-write");
1342        query_thread.join().expect("join query");
1343        let trace = rx.await.expect("receive trace");
1344        let mut observed_rights: HashSet<AccessVector> = Default::default();
1345        let mut prev_rights = AccessVector::NONE;
1346        for (i, rights) in trace.into_iter().enumerate() {
1347            if i != 0 && rights.allow != prev_rights {
1348                // Return-to-previous-rights => cache incoherence!
1349                assert!(!observed_rights.contains(&rights.allow));
1350                observed_rights.insert(rights.allow);
1351            }
1352
1353            prev_rights = rights.allow;
1354        }
1355    }
1356
1357    #[fuchsia::test]
1358    async fn locked_fixed_access_vector_cache_coherence() {
1359        for _ in 0..10 {
1360            test_locked_fixed_access_vector_cache_coherence().await
1361        }
1362    }
1363
1364    /// Tests cache coherence over two policy changes over a `Locked<Fixed>`.
1365    async fn test_locked_fixed_access_vector_cache_coherence() {
1366        //
1367        // Test setup
1368        //
1369
1370        let active_policy: Arc<AtomicU32> = Arc::new(Default::default());
1371        let policy_server = Arc::new(PolicyServer { policy: active_policy.clone() });
1372        let fixed_avc = FifoQueryCache::<_>::new(policy_server.clone(), TEST_CAPACITY);
1373        let avc = Locked::new(fixed_avc);
1374        let sids = unique_sids(30);
1375
1376        // Ensure the initial policy is `NO_RIGHTS`.
1377        policy_server.set_policy(NO_RIGHTS);
1378
1379        //
1380        // Test run: Two threads will query the AVC many times while two other threads make policy
1381        // changes.
1382        //
1383
1384        // Allow both query threads to synchronize on "last policy change has been made". Query
1385        // threads use this signal to ensure at least some of their queries occur after the last
1386        // policy change.
1387        let (tx_last_policy_change_1, rx_last_policy_change_1) =
1388            futures::channel::oneshot::channel();
1389        let (tx_last_policy_change_2, rx_last_policy_change_2) =
1390            futures::channel::oneshot::channel();
1391
1392        // Set up two querying threads. The number of iterations in each thread is highly likely
1393        // to perform queries that overlap with the two policy changes, but to be sure, use
1394        // `rx_last_policy_change_#` to synchronize  before last queries.
1395        let (tx1, rx1) = futures::channel::oneshot::channel();
1396        let avc_for_query_1 = avc.clone();
1397        let sids_for_query_1 = sids.clone();
1398
1399        let query_thread_1 = spawn(|| async move {
1400            let sids = sids_for_query_1;
1401            let mut trace = vec![];
1402
1403            for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(2000) {
1404                trace.push((
1405                    sids[i].clone(),
1406                    avc_for_query_1.compute_access_decision(
1407                        sids[i].clone(),
1408                        A_TEST_SID.clone(),
1409                        ObjectClass::Process.into(),
1410                    ),
1411                ))
1412            }
1413
1414            rx_last_policy_change_1.await.expect("receive last-policy-change signal (1)");
1415
1416            for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(10) {
1417                trace.push((
1418                    sids[i].clone(),
1419                    avc_for_query_1.compute_access_decision(
1420                        sids[i].clone(),
1421                        A_TEST_SID.clone(),
1422                        ObjectClass::Process.into(),
1423                    ),
1424                ))
1425            }
1426
1427            tx1.send(trace).expect("send trace 1");
1428
1429            //
1430            // Test expectations: After `<final-policy-reset>; avc.reset();
1431            // avc.compute_access_decision();`, all caches (including those that lazily reset on
1432            // next query) must contain *only* items consistent with the final policy: `(_, _, ) =>
1433            // WRITE`.
1434            //
1435
1436            for (_, result) in avc_for_query_1.delegate.lock().access_cache.iter() {
1437                assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1438            }
1439        });
1440
1441        let (tx2, rx2) = futures::channel::oneshot::channel();
1442        let avc_for_query_2 = avc.clone();
1443        let sids_for_query_2 = sids.clone();
1444
1445        let query_thread_2 = spawn(|| async move {
1446            let sids = sids_for_query_2;
1447            let mut trace = vec![];
1448
1449            for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(2000) {
1450                trace.push((
1451                    sids[i].clone(),
1452                    avc_for_query_2.compute_access_decision(
1453                        sids[i].clone(),
1454                        A_TEST_SID.clone(),
1455                        ObjectClass::Process.into(),
1456                    ),
1457                ))
1458            }
1459
1460            rx_last_policy_change_2.await.expect("receive last-policy-change signal (2)");
1461
1462            for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(10) {
1463                trace.push((
1464                    sids[i].clone(),
1465                    avc_for_query_2.compute_access_decision(
1466                        sids[i].clone(),
1467                        A_TEST_SID.clone(),
1468                        ObjectClass::Process.into(),
1469                    ),
1470                ))
1471            }
1472
1473            tx2.send(trace).expect("send trace 2");
1474
1475            //
1476            // Test expectations: After `<final-policy-reset>; avc.reset();
1477            // avc.compute_access_decision();`, all caches (including those that lazily reset on
1478            // next query) must contain *only* items consistent with the final policy: `(_, _, ) =>
1479            // NONE`.
1480            //
1481
1482            for (_, result) in avc_for_query_2.delegate.lock().access_cache.iter() {
1483                assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1484            }
1485        });
1486
1487        let policy_server_for_set_read = policy_server.clone();
1488        let avc_for_set_read = avc.clone();
1489        let (tx_set_read, rx_set_read) = futures::channel::oneshot::channel();
1490        let set_read_thread = spawn(move || {
1491            // Allow some queries to accumulate before first policy change.
1492            std::thread::sleep(std::time::Duration::from_micros(1));
1493
1494            // Set security server policy *first*, then reset caches. This is normally the
1495            // responsibility of the security server.
1496            policy_server_for_set_read.set_policy(READ_RIGHTS);
1497            avc_for_set_read.reset();
1498
1499            tx_set_read.send(true).expect("send set-read signal")
1500        });
1501
1502        let policy_server_for_set_write = policy_server.clone();
1503        let avc_for_set_write = avc;
1504        let set_write_thread = spawn(|| async move {
1505            // Complete set-read before executing set-write.
1506            rx_set_read.await.expect("receive set-write signal");
1507            std::thread::sleep(std::time::Duration::from_micros(1));
1508
1509            // Set security server policy *first*, then reset caches. This is normally the
1510            // responsibility of the security server.
1511            policy_server_for_set_write.set_policy(WRITE_RIGHTS);
1512            avc_for_set_write.reset();
1513
1514            tx_last_policy_change_1.send(true).expect("send last-policy-change signal (1)");
1515            tx_last_policy_change_2.send(true).expect("send last-policy-change signal (2)");
1516        });
1517
1518        // Join all threads.
1519        set_read_thread.join().expect("join set-policy-to-read");
1520        let _ = set_write_thread.join().expect("join set-policy-to-write").await;
1521        let _ = query_thread_1.join().expect("join query").await;
1522        let _ = query_thread_2.join().expect("join query").await;
1523
1524        // Receive traces from query threads.
1525        let trace_1 = rx1.await.expect("receive trace 1");
1526        let trace_2 = rx2.await.expect("receive trace 2");
1527
1528        //
1529        // Test expectations: Inspect individual query thread traces separately. For each thread,
1530        // group `(sid, 0, 0) -> AccessVector` trace items by `sid`, keeping them in chronological
1531        // order. Every such grouping should observe at most `NONE->READ`, `READ->WRITE`
1532        // transitions. Any other transitions suggests out-of-order "jitter" from stale cache items.
1533        //
1534        // We cannot expect stronger guarantees (e.g., across different queries). For example, the
1535        // following scheduling is possible:
1536        //
1537        // 1. Policy change thread changes policy from NONE to READ;
1538        // 2. Query thread qt queries q1, which as never been queried before. Result: READ.
1539        // 3. Query thread qt queries q0, which was cached before policy reload. Result: NONE.
1540        // 4. All caches reset.
1541        //
1542        // Notice that, ignoring query inputs, qt observes trace `..., READ, NONE`. However, such a
1543        // sequence must not occur when observing qt's trace filtered by query input (q1, q0, etc.).
1544        //
1545
1546        for trace in [trace_1, trace_2] {
1547            let mut trace_by_sid = HashMap::<SecurityId, Vec<AccessVector>>::new();
1548            for (sid, access_decision) in trace {
1549                trace_by_sid.entry(sid).or_insert(vec![]).push(access_decision.allow);
1550            }
1551            for access_vectors in trace_by_sid.values() {
1552                let initial_rights = AccessVector::NONE;
1553                let mut prev_rights = &initial_rights;
1554                for rights in access_vectors.iter() {
1555                    // Note: `WRITE > READ > NONE`.
1556                    assert!(rights >= prev_rights);
1557                    prev_rights = rights;
1558                }
1559            }
1560        }
1561    }
1562
1563    struct SecurityServer {
1564        manager: Manager<SecurityServer>,
1565        policy: Arc<AtomicU32>,
1566    }
1567
1568    impl SecurityServer {
1569        fn manager(&self) -> &Manager<SecurityServer> {
1570            &self.manager
1571        }
1572    }
1573
1574    impl Query for SecurityServer {
1575        fn compute_access_decision(
1576            &self,
1577            _source_sid: SecurityId,
1578            _target_sid: SecurityId,
1579            _target_class: AbstractObjectClass,
1580        ) -> AccessDecision {
1581            let policy = self.policy.as_ref().load(Ordering::Relaxed);
1582            if policy == NO_RIGHTS {
1583                AccessDecision::default()
1584            } else if policy == READ_RIGHTS {
1585                ACCESS_VECTOR_READ
1586            } else if policy == WRITE_RIGHTS {
1587                ACCESS_VECTOR_WRITE
1588            } else {
1589                panic!("compute_access_decision found invalid policy: {}", policy);
1590            }
1591        }
1592
1593        fn compute_new_fs_node_sid(
1594            &self,
1595            _source_sid: SecurityId,
1596            _target_sid: SecurityId,
1597            _fs_node_class: FsNodeClass,
1598        ) -> Result<SecurityId, anyhow::Error> {
1599            unreachable!()
1600        }
1601
1602        fn compute_new_fs_node_sid_with_name(
1603            &self,
1604            _source_sid: SecurityId,
1605            _target_sid: SecurityId,
1606            _fs_node_class: FsNodeClass,
1607            _fs_node_name: NullessByteStr<'_>,
1608        ) -> Option<SecurityId> {
1609            unreachable!()
1610        }
1611
1612        fn compute_ioctl_access_decision(
1613            &self,
1614            _source_sid: SecurityId,
1615            _target_sid: SecurityId,
1616            _target_class: AbstractObjectClass,
1617            _ioctl_prefix: u8,
1618        ) -> IoctlAccessDecision {
1619            todo!()
1620        }
1621    }
1622
1623    impl Reset for SecurityServer {
1624        fn reset(&self) -> bool {
1625            true
1626        }
1627    }
1628
1629    #[fuchsia::test]
1630    async fn manager_cache_coherence() {
1631        for _ in 0..10 {
1632            test_manager_cache_coherence().await
1633        }
1634    }
1635
1636    /// Tests cache coherence over two policy changes over a `Locked<Fixed>`.
1637    async fn test_manager_cache_coherence() {
1638        //
1639        // Test setup
1640        //
1641
1642        let (active_policy, security_server) = {
1643            // Carefully initialize strong and weak references between security server and its cache
1644            // manager.
1645
1646            let manager = Manager::new();
1647
1648            // Initialize `security_server` to own `manager`.
1649            let active_policy: Arc<AtomicU32> = Arc::new(Default::default());
1650            let security_server =
1651                Arc::new(SecurityServer { manager, policy: active_policy.clone() });
1652
1653            // Replace `security_server.manager`'s  empty `Weak` with `Weak<security_server>` to
1654            // start servering `security_server`'s policy out of `security_server.manager`'s cache.
1655            security_server
1656                .as_ref()
1657                .manager()
1658                .set_security_server(Arc::downgrade(&security_server));
1659
1660            (active_policy, security_server)
1661        };
1662        let sids = unique_sids(30);
1663
1664        fn set_policy(owner: &Arc<AtomicU32>, policy: u32) {
1665            if policy > 2 {
1666                panic!("attempt to set policy to invalid value: {}", policy);
1667            }
1668            owner.as_ref().store(policy, Ordering::Relaxed);
1669        }
1670
1671        // Ensure the initial policy is `NO_RIGHTS`.
1672        set_policy(&active_policy, NO_RIGHTS);
1673
1674        //
1675        // Test run: Two threads will query the AVC many times while two other threads make policy
1676        // changes.
1677        //
1678
1679        // Allow both query threads to synchronize on "last policy change has been made". Query
1680        // threads use this signal to ensure at least some of their queries occur after the last
1681        // policy change.
1682        let (tx_last_policy_change_1, rx_last_policy_change_1) =
1683            futures::channel::oneshot::channel();
1684        let (tx_last_policy_change_2, rx_last_policy_change_2) =
1685            futures::channel::oneshot::channel();
1686
1687        // Set up two querying threads. The number of iterations in each thread is highly likely
1688        // to perform queries that overlap with the two policy changes, but to be sure, use
1689        // `rx_last_policy_change_#` to synchronize  before last queries.
1690        let (tx1, rx1) = futures::channel::oneshot::channel();
1691        let mut avc_for_query_1 = security_server.manager().new_thread_local_cache();
1692        let sids_for_query_1 = sids.clone();
1693
1694        let query_thread_1 = spawn(|| async move {
1695            let sids = sids_for_query_1;
1696            let mut trace = vec![];
1697
1698            for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(2000) {
1699                trace.push((
1700                    sids[i].clone(),
1701                    avc_for_query_1.compute_access_decision(
1702                        sids[i].clone(),
1703                        A_TEST_SID.clone(),
1704                        ObjectClass::Process.into(),
1705                    ),
1706                ))
1707            }
1708
1709            rx_last_policy_change_1.await.expect("receive last-policy-change signal (1)");
1710
1711            for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(10) {
1712                trace.push((
1713                    sids[i].clone(),
1714                    avc_for_query_1.compute_access_decision(
1715                        sids[i].clone(),
1716                        A_TEST_SID.clone(),
1717                        ObjectClass::Process.into(),
1718                    ),
1719                ))
1720            }
1721
1722            tx1.send(trace).expect("send trace 1");
1723
1724            //
1725            // Test expectations: After `<final-policy-reset>; avc.reset();
1726            // avc.compute_access_decision();`, all caches (including those that lazily reset on
1727            // next query) must contain *only* items consistent with the final policy: `(_, _, ) =>
1728            // WRITE`.
1729            //
1730
1731            for (_, result) in avc_for_query_1.delegate.access_cache.iter() {
1732                assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1733            }
1734        });
1735
1736        let (tx2, rx2) = futures::channel::oneshot::channel();
1737        let mut avc_for_query_2 = security_server.manager().new_thread_local_cache();
1738        let sids_for_query_2 = sids.clone();
1739
1740        let query_thread_2 = spawn(|| async move {
1741            let sids = sids_for_query_2;
1742            let mut trace = vec![];
1743
1744            for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(2000) {
1745                trace.push((
1746                    sids[i].clone(),
1747                    avc_for_query_2.compute_access_decision(
1748                        sids[i].clone(),
1749                        A_TEST_SID.clone(),
1750                        ObjectClass::Process.into(),
1751                    ),
1752                ))
1753            }
1754
1755            rx_last_policy_change_2.await.expect("receive last-policy-change signal (2)");
1756
1757            for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(10) {
1758                trace.push((
1759                    sids[i].clone(),
1760                    avc_for_query_2.compute_access_decision(
1761                        sids[i].clone(),
1762                        A_TEST_SID.clone(),
1763                        ObjectClass::Process.into(),
1764                    ),
1765                ))
1766            }
1767
1768            tx2.send(trace).expect("send trace 2");
1769
1770            //
1771            // Test expectations: After `<final-policy-reset>; avc.reset();
1772            // avc.compute_access_decision();`, all caches (including those that lazily reset on
1773            // next query) must contain *only* items consistent with the final policy: `(_, _, ) =>
1774            // WRITE`.
1775            //
1776
1777            for (_, result) in avc_for_query_2.delegate.access_cache.iter() {
1778                assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1779            }
1780        });
1781
1782        // Set up two threads that will update the security policy *first*, then reset caches.
1783        // The threads synchronize to ensure a policy order of NONE->READ->WRITE.
1784        let active_policy_for_set_read = active_policy.clone();
1785        let security_server_for_set_read = security_server.clone();
1786        let (tx_set_read, rx_set_read) = futures::channel::oneshot::channel();
1787        let set_read_thread = spawn(move || {
1788            // Allow some queries to accumulate before first policy change.
1789            std::thread::sleep(std::time::Duration::from_micros(1));
1790
1791            // Set security server policy *first*, then reset caches. This is normally the
1792            // responsibility of the security server.
1793            set_policy(&active_policy_for_set_read, READ_RIGHTS);
1794            security_server_for_set_read.manager().reset();
1795
1796            tx_set_read.send(true).expect("send set-read signal")
1797        });
1798        let active_policy_for_set_write = active_policy.clone();
1799        let security_server_for_set_write = security_server.clone();
1800        let set_write_thread = spawn(|| async move {
1801            // Complete set-read before executing set-write.
1802            rx_set_read.await.expect("receive set-read signal");
1803            std::thread::sleep(std::time::Duration::from_micros(1));
1804
1805            // Set security server policy *first*, then reset caches. This is normally the
1806            // responsibility of the security server.
1807            set_policy(&active_policy_for_set_write, WRITE_RIGHTS);
1808            security_server_for_set_write.manager().reset();
1809
1810            tx_last_policy_change_1.send(true).expect("send last-policy-change signal (1)");
1811            tx_last_policy_change_2.send(true).expect("send last-policy-change signal (2)");
1812        });
1813
1814        // Join all threads.
1815        set_read_thread.join().expect("join set-policy-to-read");
1816        let _ = set_write_thread.join().expect("join set-policy-to-write").await;
1817        let _ = query_thread_1.join().expect("join query").await;
1818        let _ = query_thread_2.join().expect("join query").await;
1819
1820        // Receive traces from query threads.
1821        let trace_1 = rx1.await.expect("receive trace 1");
1822        let trace_2 = rx2.await.expect("receive trace 2");
1823
1824        //
1825        // Test expectations: Inspect individual query thread traces separately. For each thread,
1826        // group `(sid, 0, 0) -> AccessVector` trace items by `sid`, keeping them in chronological
1827        // order. Every such grouping should observe at most `NONE->READ`, `READ->WRITE`
1828        // transitions. Any other transitions suggests out-of-order "jitter" from stale cache items.
1829        //
1830        // We cannot expect stronger guarantees (e.g., across different queries). For example, the
1831        // following scheduling is possible:
1832        //
1833        // 1. Policy change thread changes policy from NONE to READ;
1834        // 2. Query thread qt queries q1, which as never been queried before. Result: READ.
1835        // 3. Query thread qt queries q0, which was cached before policy reload. Result: NONE.
1836        // 4. All caches reset.
1837        //
1838        // Notice that, ignoring query inputs, qt observes `..., READ, NONE`. However, such a
1839        // sequence must not occur when observing qt's trace filtered by query input (q1, q0, etc.).
1840        //
1841        // Finally, the shared (`Locked`) cache should contain only entries consistent with
1842        // the final policy: `(_, _, ) => WRITE`.
1843        //
1844
1845        for trace in [trace_1, trace_2] {
1846            let mut trace_by_sid = HashMap::<SecurityId, Vec<AccessVector>>::new();
1847            for (sid, access_decision) in trace {
1848                trace_by_sid.entry(sid).or_insert(vec![]).push(access_decision.allow);
1849            }
1850            for access_vectors in trace_by_sid.values() {
1851                let initial_rights = AccessVector::NONE;
1852                let mut prev_rights = &initial_rights;
1853                for rights in access_vectors.iter() {
1854                    // Note: `WRITE > READ > NONE`.
1855                    assert!(rights >= prev_rights);
1856                    prev_rights = rights;
1857                }
1858            }
1859        }
1860
1861        let shared_cache = security_server.manager().shared_cache.delegate.lock();
1862        for (_, result) in shared_cache.access_cache.iter() {
1863            assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1864        }
1865    }
1866}