1#[cfg(target_arch = "aarch64")]
6mod arm64;
7
8#[cfg(target_arch = "aarch64")]
9pub use arm64::*;
10
11#[cfg(target_arch = "x86_64")]
12mod x64;
13
14#[cfg(target_arch = "x86_64")]
15pub use x64::*;
16
17#[cfg(target_arch = "riscv64")]
18mod riscv64;
19
20#[cfg(target_arch = "riscv64")]
21pub use riscv64::*;
22
23use starnix_logging::{CATEGORY_STARNIX, NAME_MAP_RESTRICTED_STATE, firehose_trace_duration};
24use starnix_uapi::__static_assertions::assert_not_impl_any;
25use std::ops::Deref;
26use std::ptr::NonNull;
27
28pub struct RestrictedState {
32 pub bound_state: NonNull<zx::sys::zx_restricted_state_t>,
33 state_size: usize,
34}
35
36impl RestrictedState {
37 pub fn bind_and_map(
43 register_state: &mut RegisterState<RegisterStorageEnum>,
44 exception_report: &mut zx::sys::zx_exception_report_t,
45 ) -> Result<Self, zx::Status> {
46 firehose_trace_duration!(CATEGORY_STARNIX, NAME_MAP_RESTRICTED_STATE);
47 let mut out_vmo_handle = 0;
48 let status = zx::Status::from_raw(unsafe {
50 zx::sys::zx_restricted_bind_state(
51 0,
52 &mut out_vmo_handle,
53 std::ptr::from_mut(exception_report),
54 )
55 });
56 match { status } {
57 zx::Status::OK => {
58 }
62 _ => panic!("zx_restricted_bind_state failed with {status}!"),
63 }
64 let state_vmo = unsafe { zx::Vmo::from(zx::NullableHandle::from_raw(out_vmo_handle)) };
66
67 let name = format!(
70 "restricted_state_vmo:{}",
71 fuchsia_runtime::with_thread_self(|t| t.koid())?.raw_koid()
72 );
73 let name = zx::Name::new(&name)?;
74 state_vmo.set_name(&name)?;
75
76 let state_size = state_vmo.get_size()? as usize;
77 if state_size < std::mem::size_of::<zx::sys::zx_restricted_exception_t>() {
78 return Err(zx::Status::INVALID_ARGS);
79 }
80
81 let state_address = fuchsia_runtime::vmar_root_self().map(
82 0,
83 &state_vmo,
84 0,
85 state_size,
86 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
87 )?;
88
89 let state_address: *mut zx::sys::zx_restricted_state_t =
92 std::ptr::without_provenance_mut(state_address);
93 assert!(state_address.is_aligned(), "Zircon must map restricted-state-aligned memory");
94 let bound_state =
95 NonNull::new(state_address).expect("Zircon must map non-null restricted-state");
96
97 unsafe {
101 let vmo_ptr = bound_state.as_ptr();
102 vmo_ptr.write(**register_state);
103 register_state.real_registers =
104 RegisterStorageEnum::Vmo(MappedVmoRegs(NonNull::new_unchecked(vmo_ptr)));
105 }
106
107 Ok(Self { state_size, bound_state })
108 }
109}
110
111impl std::ops::Drop for RestrictedState {
112 fn drop(&mut self) {
113 let mapping_addr = self.bound_state.as_ptr() as usize;
114 unsafe {
117 fuchsia_runtime::vmar_root_self()
118 .unmap(mapping_addr, self.state_size)
119 .expect("Failed to unmap");
120 zx::sys::zx_restricted_unbind_state(0);
121 }
122 }
123}
124
125pub trait RegisterStorage:
126 std::ops::Deref<Target = zx::sys::zx_restricted_state_t>
127 + std::ops::DerefMut
128 + Eq
129 + PartialEq
130 + std::fmt::Debug
131 + Clone
132{
133}
134
135#[derive(Eq, PartialEq, Clone)]
136struct MappedVmoRegs(NonNull<zx::sys::zx_restricted_state_t>);
137assert_not_impl_any!(MappedVmoRegs: Send, Sync);
139
140impl std::fmt::Debug for MappedVmoRegs {
141 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
142 f.debug_tuple("MappedVmoRegs").field(&format_args!("{:?}", self.deref())).finish()
143 }
144}
145
146impl std::ops::Deref for MappedVmoRegs {
147 type Target = zx::sys::zx_restricted_state_t;
148
149 fn deref(&self) -> &Self::Target {
150 unsafe { self.0.as_ref() }
152 }
153}
154
155impl std::ops::DerefMut for MappedVmoRegs {
156 fn deref_mut(&mut self) -> &mut Self::Target {
157 unsafe { self.0.as_mut() }
159 }
160}
161
162impl RegisterStorage for MappedVmoRegs {}
163
164#[derive(Eq, PartialEq, Debug, Clone, Default)]
165pub struct HeapRegs(Box<zx::sys::zx_restricted_state_t>);
166
167impl std::ops::Deref for HeapRegs {
168 type Target = zx::sys::zx_restricted_state_t;
169
170 fn deref(&self) -> &Self::Target {
171 &self.0
172 }
173}
174
175impl std::ops::DerefMut for HeapRegs {
176 fn deref_mut(&mut self) -> &mut Self::Target {
177 &mut self.0
178 }
179}
180
181impl RegisterStorage for HeapRegs {}
182
183impl From<RegisterStorageEnum> for HeapRegs {
184 fn from(regs: RegisterStorageEnum) -> Self {
185 match regs {
186 RegisterStorageEnum::Vmo(vmo) => HeapRegs(Box::new(*vmo)),
187 RegisterStorageEnum::Heap(heap) => heap,
188 }
189 }
190}
191
192#[derive(Eq, PartialEq, Debug)]
197pub enum RegisterStorageEnum {
198 #[allow(private_interfaces)]
200 Vmo(MappedVmoRegs),
201 Heap(HeapRegs),
202}
203assert_not_impl_any!(RegisterStorageEnum: Send, Sync);
204
205impl std::ops::Deref for RegisterStorageEnum {
206 type Target = zx::sys::zx_restricted_state_t;
207
208 fn deref(&self) -> &Self::Target {
209 match self {
210 RegisterStorageEnum::Vmo(vmo) => vmo.deref(),
211 RegisterStorageEnum::Heap(heap) => heap.deref(),
212 }
213 }
214}
215
216impl std::ops::DerefMut for RegisterStorageEnum {
217 fn deref_mut(&mut self) -> &mut Self::Target {
218 match self {
219 RegisterStorageEnum::Vmo(vmo) => vmo.deref_mut(),
220 RegisterStorageEnum::Heap(heap) => heap.deref_mut(),
221 }
222 }
223}
224
225impl Clone for RegisterStorageEnum {
226 fn clone(&self) -> RegisterStorageEnum {
227 RegisterStorageEnum::Heap(HeapRegs(Box::new(**self)))
228 }
229}
230
231impl RegisterStorage for RegisterStorageEnum {}
232
233impl From<MappedVmoRegs> for RegisterStorageEnum {
234 fn from(regs: MappedVmoRegs) -> Self {
235 RegisterStorageEnum::Vmo(regs)
236 }
237}
238
239impl From<HeapRegs> for RegisterStorageEnum {
240 fn from(regs: HeapRegs) -> Self {
241 RegisterStorageEnum::Heap(regs)
242 }
243}