Skip to main content

starnix_registers/
lib.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#[cfg(target_arch = "aarch64")]
6mod arm64;
7
8#[cfg(target_arch = "aarch64")]
9pub use arm64::*;
10
11#[cfg(target_arch = "x86_64")]
12mod x64;
13
14#[cfg(target_arch = "x86_64")]
15pub use x64::*;
16
17#[cfg(target_arch = "riscv64")]
18mod riscv64;
19
20#[cfg(target_arch = "riscv64")]
21pub use riscv64::*;
22
23use starnix_logging::{CATEGORY_STARNIX, NAME_MAP_RESTRICTED_STATE, firehose_trace_duration};
24use starnix_uapi::__static_assertions::assert_not_impl_any;
25use std::ops::Deref;
26use std::ptr::NonNull;
27
28/// `RestrictedState` manages accesses into the restricted state VMO.
29///
30/// See `zx_restricted_bind_state`.
31pub struct RestrictedState {
32    pub bound_state: NonNull<zx::sys::zx_restricted_exception_t>,
33    state_size: usize,
34}
35
36impl RestrictedState {
37    /// Allocates a VMO for the restricted state and maps it into the Starnix kernel.
38    ///
39    /// The VMO is created using `zx_restricted_bind_state` and then mapped into the
40    /// kernel's address space. This allows Starnix to inspect and modify the
41    /// restricted state (registers) of the task.
42    pub fn bind_and_map(
43        register_state: &mut RegisterState<RegisterStorageEnum>,
44    ) -> Result<Self, zx::Status> {
45        firehose_trace_duration!(CATEGORY_STARNIX, NAME_MAP_RESTRICTED_STATE);
46        let mut out_vmo_handle = 0;
47        // SAFETY: `out_vmo_handle` is a valid pointer to a handle on the stack.
48        let status = zx::Status::from_raw(unsafe {
49            zx::sys::zx_restricted_bind_state(0, &mut out_vmo_handle)
50        });
51        match { status } {
52            zx::Status::OK => {
53                // We've successfully attached the VMO to the current thread. This VMO will be
54                // mapped and used for the kernel to store restricted mode register state as it
55                // enters and exits restricted mode.
56            }
57            _ => panic!("zx_restricted_bind_state failed with {status}!"),
58        }
59        // SAFETY: `out_vmo_handle` is a valid handle as `zx_restricted_bind_state` returned OK.
60        let state_vmo = unsafe { zx::Vmo::from(zx::NullableHandle::from_raw(out_vmo_handle)) };
61
62        // Name the VMO so external tools (like the CPU profiler) can capture
63        // this specific VMO.
64        let name = format!(
65            "restricted_state_vmo:{}",
66            fuchsia_runtime::with_thread_self(|t| t.koid())?.raw_koid()
67        );
68        let name = zx::Name::new(&name)?;
69        state_vmo.set_name(&name)?;
70
71        let state_size = state_vmo.get_size()? as usize;
72        if state_size < std::mem::size_of::<zx::sys::zx_restricted_exception_t>() {
73            return Err(zx::Status::INVALID_ARGS);
74        }
75
76        let state_address = fuchsia_runtime::vmar_root_self().map(
77            0,
78            &state_vmo,
79            0,
80            state_size,
81            zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
82        )?;
83
84        // This memory is not managed by Rust's stack, heap, etc. so treat it as "foreign" memory
85        // with no provenance.
86        let state_address: *mut zx::sys::zx_restricted_exception_t =
87            std::ptr::without_provenance_mut(state_address);
88        assert!(state_address.is_aligned(), "Zircon must map restricted-state-aligned memory");
89        let bound_state =
90            NonNull::new(state_address).expect("Zircon must map non-null restricted-state");
91
92        // Copy the initial register state into the mapped VMO and link the VMO to the register
93        // state of the current thread.
94        // SAFETY: `bound_state` is valid to read/write to as long as `RestrictedState` is live.
95        unsafe {
96            let vmo_ptr = std::ptr::addr_of_mut!((*bound_state.as_ptr()).state);
97            vmo_ptr.write(**register_state);
98            register_state.real_registers =
99                RegisterStorageEnum::Vmo(MappedVmoRegs(NonNull::new_unchecked(vmo_ptr)));
100        }
101
102        Ok(Self { state_size, bound_state })
103    }
104
105    pub fn read_exception(&self) -> zx::ExceptionReport {
106        // SAFETY: `bound_state` is valid to read from as long as `RestrictedState` is live.
107        let raw = unsafe { self.bound_state.read() };
108
109        // SAFETY: `raw` was written by Zircon during a restricted exit.
110        unsafe { zx::ExceptionReport::from_raw(raw.exception) }
111    }
112}
113
114impl std::ops::Drop for RestrictedState {
115    fn drop(&mut self) {
116        let mapping_addr = self.bound_state.as_ptr() as usize;
117        // Safety: We are un-mapping the state VMO. This is safe because we route all access
118        // into this memory region though this struct so it is safe to unmap on Drop.
119        unsafe {
120            fuchsia_runtime::vmar_root_self()
121                .unmap(mapping_addr, self.state_size)
122                .expect("Failed to unmap");
123            zx::sys::zx_restricted_unbind_state(0);
124        }
125    }
126}
127
128pub trait RegisterStorage:
129    std::ops::Deref<Target = zx::sys::zx_restricted_state_t>
130    + std::ops::DerefMut
131    + Eq
132    + PartialEq
133    + std::fmt::Debug
134    + Clone
135{
136}
137
138#[derive(Eq, PartialEq, Clone)]
139struct MappedVmoRegs(NonNull<zx::sys::zx_restricted_state_t>);
140// MappedVmoRegs should be tied to the CurrentTask, so it is not Send or Sync.
141assert_not_impl_any!(MappedVmoRegs: Send, Sync);
142
143impl std::fmt::Debug for MappedVmoRegs {
144    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
145        f.debug_tuple("MappedVmoRegs").field(&format_args!("{:?}", self.deref())).finish()
146    }
147}
148
149impl std::ops::Deref for MappedVmoRegs {
150    type Target = zx::sys::zx_restricted_state_t;
151
152    fn deref(&self) -> &Self::Target {
153        // SAFETY: The pointer is valid and points to a valid `zx_restricted_state_t`.
154        unsafe { self.0.as_ref() }
155    }
156}
157
158impl std::ops::DerefMut for MappedVmoRegs {
159    fn deref_mut(&mut self) -> &mut Self::Target {
160        // SAFETY: The pointer is valid and points to a valid `zx_restricted_state_t`.
161        unsafe { self.0.as_mut() }
162    }
163}
164
165impl RegisterStorage for MappedVmoRegs {}
166
167#[derive(Eq, PartialEq, Debug, Clone, Default)]
168pub struct HeapRegs(Box<zx::sys::zx_restricted_state_t>);
169
170impl std::ops::Deref for HeapRegs {
171    type Target = zx::sys::zx_restricted_state_t;
172
173    fn deref(&self) -> &Self::Target {
174        &self.0
175    }
176}
177
178impl std::ops::DerefMut for HeapRegs {
179    fn deref_mut(&mut self) -> &mut Self::Target {
180        &mut self.0
181    }
182}
183
184impl RegisterStorage for HeapRegs {}
185
186impl From<RegisterStorageEnum> for HeapRegs {
187    fn from(regs: RegisterStorageEnum) -> Self {
188        match regs {
189            RegisterStorageEnum::Vmo(vmo) => HeapRegs(Box::new(*vmo)),
190            RegisterStorageEnum::Heap(heap) => heap,
191        }
192    }
193}
194
195/// An enum to hold the registers in either a heap or a vmo.
196///
197/// This is introduced to allow `CurrentTask` to store registers in heap during initialization and
198/// link to the vmo after the `RestrictedState` is created.
199#[derive(Eq, PartialEq, Debug)]
200pub enum RegisterStorageEnum {
201    // Keep it private to prevent from using it directly.
202    #[allow(private_interfaces)]
203    Vmo(MappedVmoRegs),
204    Heap(HeapRegs),
205}
206assert_not_impl_any!(RegisterStorageEnum: Send, Sync);
207
208impl std::ops::Deref for RegisterStorageEnum {
209    type Target = zx::sys::zx_restricted_state_t;
210
211    fn deref(&self) -> &Self::Target {
212        match self {
213            RegisterStorageEnum::Vmo(vmo) => vmo.deref(),
214            RegisterStorageEnum::Heap(heap) => heap.deref(),
215        }
216    }
217}
218
219impl std::ops::DerefMut for RegisterStorageEnum {
220    fn deref_mut(&mut self) -> &mut Self::Target {
221        match self {
222            RegisterStorageEnum::Vmo(vmo) => vmo.deref_mut(),
223            RegisterStorageEnum::Heap(heap) => heap.deref_mut(),
224        }
225    }
226}
227
228impl Clone for RegisterStorageEnum {
229    fn clone(&self) -> RegisterStorageEnum {
230        RegisterStorageEnum::Heap(HeapRegs(Box::new(**self)))
231    }
232}
233
234impl RegisterStorage for RegisterStorageEnum {}
235
236impl From<MappedVmoRegs> for RegisterStorageEnum {
237    fn from(regs: MappedVmoRegs) -> Self {
238        RegisterStorageEnum::Vmo(regs)
239    }
240}
241
242impl From<HeapRegs> for RegisterStorageEnum {
243    fn from(regs: HeapRegs) -> Self {
244        RegisterStorageEnum::Heap(regs)
245    }
246}