1#[cfg(target_arch = "aarch64")]
6mod arm64;
7
8#[cfg(target_arch = "aarch64")]
9pub use arm64::*;
10
11#[cfg(target_arch = "x86_64")]
12mod x64;
13
14#[cfg(target_arch = "x86_64")]
15pub use x64::*;
16
17#[cfg(target_arch = "riscv64")]
18mod riscv64;
19
20#[cfg(target_arch = "riscv64")]
21pub use riscv64::*;
22
23use starnix_logging::{CATEGORY_STARNIX, NAME_MAP_RESTRICTED_STATE, firehose_trace_duration};
24use starnix_uapi::__static_assertions::assert_not_impl_any;
25use std::ops::Deref;
26use std::ptr::NonNull;
27
28pub struct RestrictedState {
32 pub bound_state: NonNull<zx::sys::zx_restricted_exception_t>,
33 state_size: usize,
34}
35
36impl RestrictedState {
37 pub fn bind_and_map(
43 register_state: &mut RegisterState<RegisterStorageEnum>,
44 ) -> Result<Self, zx::Status> {
45 firehose_trace_duration!(CATEGORY_STARNIX, NAME_MAP_RESTRICTED_STATE);
46 let mut out_vmo_handle = 0;
47 let status = zx::Status::from_raw(unsafe {
49 zx::sys::zx_restricted_bind_state(0, &mut out_vmo_handle)
50 });
51 match { status } {
52 zx::Status::OK => {
53 }
57 _ => panic!("zx_restricted_bind_state failed with {status}!"),
58 }
59 let state_vmo = unsafe { zx::Vmo::from(zx::NullableHandle::from_raw(out_vmo_handle)) };
61
62 let name = format!(
65 "restricted_state_vmo:{}",
66 fuchsia_runtime::with_thread_self(|t| t.koid())?.raw_koid()
67 );
68 let name = zx::Name::new(&name)?;
69 state_vmo.set_name(&name)?;
70
71 let state_size = state_vmo.get_size()? as usize;
72 if state_size < std::mem::size_of::<zx::sys::zx_restricted_exception_t>() {
73 return Err(zx::Status::INVALID_ARGS);
74 }
75
76 let state_address = fuchsia_runtime::vmar_root_self().map(
77 0,
78 &state_vmo,
79 0,
80 state_size,
81 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
82 )?;
83
84 let state_address: *mut zx::sys::zx_restricted_exception_t =
87 std::ptr::without_provenance_mut(state_address);
88 assert!(state_address.is_aligned(), "Zircon must map restricted-state-aligned memory");
89 let bound_state =
90 NonNull::new(state_address).expect("Zircon must map non-null restricted-state");
91
92 unsafe {
96 let vmo_ptr = std::ptr::addr_of_mut!((*bound_state.as_ptr()).state);
97 vmo_ptr.write(**register_state);
98 register_state.real_registers =
99 RegisterStorageEnum::Vmo(MappedVmoRegs(NonNull::new_unchecked(vmo_ptr)));
100 }
101
102 Ok(Self { state_size, bound_state })
103 }
104
105 pub fn read_exception(&self) -> zx::ExceptionReport {
106 let raw = unsafe { self.bound_state.read() };
108
109 unsafe { zx::ExceptionReport::from_raw(raw.exception) }
111 }
112}
113
114impl std::ops::Drop for RestrictedState {
115 fn drop(&mut self) {
116 let mapping_addr = self.bound_state.as_ptr() as usize;
117 unsafe {
120 fuchsia_runtime::vmar_root_self()
121 .unmap(mapping_addr, self.state_size)
122 .expect("Failed to unmap");
123 zx::sys::zx_restricted_unbind_state(0);
124 }
125 }
126}
127
128pub trait RegisterStorage:
129 std::ops::Deref<Target = zx::sys::zx_restricted_state_t>
130 + std::ops::DerefMut
131 + Eq
132 + PartialEq
133 + std::fmt::Debug
134 + Clone
135{
136}
137
138#[derive(Eq, PartialEq, Clone)]
139struct MappedVmoRegs(NonNull<zx::sys::zx_restricted_state_t>);
140assert_not_impl_any!(MappedVmoRegs: Send, Sync);
142
143impl std::fmt::Debug for MappedVmoRegs {
144 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
145 f.debug_tuple("MappedVmoRegs").field(&format_args!("{:?}", self.deref())).finish()
146 }
147}
148
149impl std::ops::Deref for MappedVmoRegs {
150 type Target = zx::sys::zx_restricted_state_t;
151
152 fn deref(&self) -> &Self::Target {
153 unsafe { self.0.as_ref() }
155 }
156}
157
158impl std::ops::DerefMut for MappedVmoRegs {
159 fn deref_mut(&mut self) -> &mut Self::Target {
160 unsafe { self.0.as_mut() }
162 }
163}
164
165impl RegisterStorage for MappedVmoRegs {}
166
167#[derive(Eq, PartialEq, Debug, Clone, Default)]
168pub struct HeapRegs(Box<zx::sys::zx_restricted_state_t>);
169
170impl std::ops::Deref for HeapRegs {
171 type Target = zx::sys::zx_restricted_state_t;
172
173 fn deref(&self) -> &Self::Target {
174 &self.0
175 }
176}
177
178impl std::ops::DerefMut for HeapRegs {
179 fn deref_mut(&mut self) -> &mut Self::Target {
180 &mut self.0
181 }
182}
183
184impl RegisterStorage for HeapRegs {}
185
186impl From<RegisterStorageEnum> for HeapRegs {
187 fn from(regs: RegisterStorageEnum) -> Self {
188 match regs {
189 RegisterStorageEnum::Vmo(vmo) => HeapRegs(Box::new(*vmo)),
190 RegisterStorageEnum::Heap(heap) => heap,
191 }
192 }
193}
194
195#[derive(Eq, PartialEq, Debug)]
200pub enum RegisterStorageEnum {
201 #[allow(private_interfaces)]
203 Vmo(MappedVmoRegs),
204 Heap(HeapRegs),
205}
206assert_not_impl_any!(RegisterStorageEnum: Send, Sync);
207
208impl std::ops::Deref for RegisterStorageEnum {
209 type Target = zx::sys::zx_restricted_state_t;
210
211 fn deref(&self) -> &Self::Target {
212 match self {
213 RegisterStorageEnum::Vmo(vmo) => vmo.deref(),
214 RegisterStorageEnum::Heap(heap) => heap.deref(),
215 }
216 }
217}
218
219impl std::ops::DerefMut for RegisterStorageEnum {
220 fn deref_mut(&mut self) -> &mut Self::Target {
221 match self {
222 RegisterStorageEnum::Vmo(vmo) => vmo.deref_mut(),
223 RegisterStorageEnum::Heap(heap) => heap.deref_mut(),
224 }
225 }
226}
227
228impl Clone for RegisterStorageEnum {
229 fn clone(&self) -> RegisterStorageEnum {
230 RegisterStorageEnum::Heap(HeapRegs(Box::new(**self)))
231 }
232}
233
234impl RegisterStorage for RegisterStorageEnum {}
235
236impl From<MappedVmoRegs> for RegisterStorageEnum {
237 fn from(regs: MappedVmoRegs) -> Self {
238 RegisterStorageEnum::Vmo(regs)
239 }
240}
241
242impl From<HeapRegs> for RegisterStorageEnum {
243 fn from(regs: HeapRegs) -> Self {
244 RegisterStorageEnum::Heap(regs)
245 }
246}