1use crate::{AsHandleRef, HandleBased, HandleRef, NullableHandle, Status, ok, sys};
8use bitflags::bitflags;
9
10mod io_slice;
11pub use self::io_slice::*;
12
13#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
19#[repr(transparent)]
20pub struct Iob(NullableHandle);
21impl_handle_based!(Iob);
22
23#[derive(Default)]
24pub struct IobOptions;
25
26#[derive(Clone, Copy)]
27pub enum IobRegionType<'a> {
28 Private { size: u64, options: IobRegionPrivateOptions },
29 Shared { options: vdso_next::IobRegionSharedOptions, region: &'a vdso_next::IobSharedRegion },
30}
31
32impl IobRegionType<'_> {
33 fn to_raw(&self) -> (sys::zx_iob_region_type_t, sys::zx_iob_region_extension_t) {
34 match self {
35 IobRegionType::Private { .. } => (
36 sys::ZX_IOB_REGION_TYPE_PRIVATE,
37 sys::zx_iob_region_extension_t { private_region: Default::default() },
38 ),
39 IobRegionType::Shared { region, .. } => (
40 sys::ZX_IOB_REGION_TYPE_SHARED,
41 sys::zx_iob_region_extension_t {
42 shared_region: sys::zx_iob_region_shared_t {
43 options: 0,
44 shared_region: region.raw_handle(),
45 padding: Default::default(),
46 },
47 },
48 ),
49 }
50 }
51}
52
53#[derive(Clone, Copy, Default)]
54pub struct IobRegionPrivateOptions;
55
56pub struct IobRegion<'a> {
57 pub region_type: IobRegionType<'a>,
58 pub access: IobAccess,
59 pub discipline: IobDiscipline,
60}
61
62bitflags! {
63 #[repr(transparent)]
64 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
65 pub struct IobAccess: u32 {
66 const EP0_CAN_MAP_READ = sys::ZX_IOB_ACCESS_EP0_CAN_MAP_READ;
67 const EP0_CAN_MAP_WRITE = sys::ZX_IOB_ACCESS_EP0_CAN_MAP_WRITE;
68 const EP0_CAN_MEDIATED_READ = sys::ZX_IOB_ACCESS_EP0_CAN_MEDIATED_READ;
69 const EP0_CAN_MEDIATED_WRITE = sys::ZX_IOB_ACCESS_EP0_CAN_MEDIATED_WRITE;
70 const EP1_CAN_MAP_READ = sys::ZX_IOB_ACCESS_EP1_CAN_MAP_READ;
71 const EP1_CAN_MAP_WRITE = sys::ZX_IOB_ACCESS_EP1_CAN_MAP_WRITE;
72 const EP1_CAN_MEDIATED_READ = sys::ZX_IOB_ACCESS_EP1_CAN_MEDIATED_READ;
73 const EP1_CAN_MEDIATED_WRITE = sys::ZX_IOB_ACCESS_EP1_CAN_MEDIATED_WRITE;
74 }
75}
76
77#[derive(Clone, Copy)]
78pub enum IobDiscipline {
79 None,
80 MediatedWriteRingBuffer { tag: u64 },
81}
82
83impl IobDiscipline {
84 fn to_raw(&self) -> sys::zx_iob_discipline_t {
85 match self {
86 IobDiscipline::None => sys::zx_iob_discipline_t {
87 r#type: sys::ZX_IOB_DISCIPLINE_TYPE_NONE,
88 extension: sys::zx_iob_discipline_extension_t {
89 reserved: [sys::PadByte::default(); 64],
90 },
91 },
92 IobDiscipline::MediatedWriteRingBuffer { tag } => sys::zx_iob_discipline_t {
93 r#type: sys::ZX_IOB_DISCIPLINE_TYPE_MEDIATED_WRITE_RING_BUFFER,
94 extension: sys::zx_iob_discipline_extension_t {
95 ring_buffer: sys::zx_iob_discipline_mediated_write_ring_buffer_t {
96 tag: *tag,
97 padding: [sys::PadByte::default(); 56],
98 },
99 },
100 },
101 }
102 }
103}
104
105#[derive(Default)]
106pub struct IobWriteOptions;
107
108impl Iob {
109 pub fn create(_options: IobOptions, regions: &[IobRegion<'_>]) -> Result<(Iob, Iob), Status> {
113 let raw_regions: Vec<_> = regions
114 .iter()
115 .map(|r| {
116 let (r#type, extension) = r.region_type.to_raw();
117 sys::zx_iob_region_t {
118 r#type,
119 access: r.access.bits(),
120 size: match &r.region_type {
121 IobRegionType::Private { size, .. } => *size,
122 IobRegionType::Shared { .. } => 0,
123 },
124 discipline: r.discipline.to_raw(),
125 extension,
126 }
127 })
128 .collect();
129 let mut handle1 = 0;
130 let mut handle2 = 0;
131 let status = unsafe {
132 sys::zx_iob_create(
133 0,
134 raw_regions.as_ptr() as *const u8,
135 raw_regions.len(),
136 &mut handle1,
137 &mut handle2,
138 )
139 };
140 ok(status)?;
141 unsafe {
142 Ok((
143 Iob::from(NullableHandle::from_raw(handle1)),
144 Iob::from(NullableHandle::from_raw(handle2)),
145 ))
146 }
147 }
148
149 pub fn write(
153 &self,
154 options: IobWriteOptions,
155 region_index: u32,
156 data: &[u8],
157 ) -> Result<(), Status> {
158 self.writev(options, region_index, &[IobIoSlice::new(data)])
159 }
160
161 pub fn writev(
167 &self,
168 _options: IobWriteOptions,
169 region_index: u32,
170 iovecs: &[IobIoSlice<'_>],
171 ) -> Result<(), Status> {
172 let status = unsafe {
173 sys::zx_iob_writev(
174 self.raw_handle(),
175 0,
176 region_index,
177 iovecs.as_ptr().cast::<sys::zx_iovec_t>(),
178 iovecs.len(),
179 )
180 };
181 ok(status)?;
182 Ok(())
183 }
184}
185
186pub(crate) mod vdso_next {
187 use super::*;
188
189 use std::sync::OnceLock;
190
191 #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
192 #[repr(transparent)]
193 pub struct IobSharedRegion(NullableHandle);
194 impl_handle_based!(IobSharedRegion);
195
196 #[derive(Default)]
198 pub struct IobSharedRegionOptions;
199
200 #[derive(Clone, Copy, Default)]
202 pub struct IobRegionSharedOptions;
203
204 impl IobSharedRegion {
205 pub fn create(_options: IobSharedRegionOptions, size: u64) -> Result<Self, Status> {
209 static ZX_IOB_CREATE_SHARED_REGION_FN: OnceLock<
213 unsafe extern "C" fn(u64, u64, *mut sys::zx_handle_t) -> sys::zx_status_t,
214 > = OnceLock::new();
215
216 let zx_iob_create_shared_region = ZX_IOB_CREATE_SHARED_REGION_FN.get_or_init(|| {
217 let symbol = unsafe {
219 libc::dlsym(libc::RTLD_DEFAULT, c"zx_iob_create_shared_region".as_ptr())
220 };
221 assert!(!symbol.is_null(), "zx_iob_create_shared_region requires vdso next");
222 unsafe { std::mem::transmute(symbol) }
224 });
225
226 let mut handle = 0;
227 let status = unsafe { zx_iob_create_shared_region(0, size, &mut handle) };
228 ok(status)?;
229 Ok(Self::from(unsafe { NullableHandle::from_raw(handle) }))
230 }
231 }
232
233 #[cfg(all(test, vdso_next))]
234 mod tests {
235 use crate::handle::AsHandleRef;
236 use crate::{
237 Iob, IobDiscipline, IobRegion, IobRegionType, IobSharedRegion, Unowned, Vmar,
238 VmarFlags, sys, system_get_page_size,
239 };
240 use std::sync::atomic::{AtomicU64, Ordering};
241
242 #[test]
243 fn test() {
244 let region_size = 2 * system_get_page_size() as usize;
245
246 let shared_region =
247 IobSharedRegion::create(region_size as u64, Default::default()).unwrap();
248
249 let (ep0, ep1) = Iob::create(
250 Default::default(),
251 &[IobRegion {
252 region_type: IobRegionType::Shared(Default::default(), &shared_region),
253 access: sys::ZX_IOB_ACCESS_EP0_CAN_MAP_READ
254 | sys::ZX_IOB_ACCESS_EP0_CAN_MAP_WRITE
255 | sys::ZX_IOB_ACCESS_EP1_CAN_MEDIATED_WRITE,
256 discipline: IobDiscipline::MediatedWriteRingBuffer,
257 }],
258 )
259 .unwrap();
260
261 ep1.write(Default::default(), 0, b"hello").unwrap();
262
263 let vmar_handle = unsafe { fuchsia_runtime::zx_vmar_root_self() };
264 let vmar = unsafe { Unowned::<Vmar>::from_raw_handle(vmar_handle) };
265 let addr = vmar
266 .map_iob(VmarFlags::PERM_READ | VmarFlags::PERM_WRITE, 0, &ep0, 0, 0, region_size)
267 .unwrap();
268
269 #[repr(C)]
270 struct Header {
271 head: AtomicU64,
272 tail: AtomicU64,
273 }
274
275 let header = unsafe { &*(addr as *const Header) };
276
277 let head = header.head.load(Ordering::Acquire);
278 assert_eq!(head, 24);
279 let tail = header.tail.load(Ordering::Relaxed);
280 assert_eq!(tail, 0);
281
282 struct Message {
283 tag: u64,
284 length: u64,
285 data: [u8; 8],
286 }
287
288 let message =
289 unsafe { &(*((addr + system_get_page_size() as usize) as *const Message)) };
290
291 assert_eq!(message.tag, ep1.get_koid().unwrap().raw_koid());
292 assert_eq!(message.length, 5);
293 assert_eq!(&message.data[..5], b"hello");
294 }
295 }
296}
297
298#[cfg(test)]
299mod tests {
300 use super::{Iob, IobAccess, IobDiscipline, IobRegion, IobRegionType};
301 use crate::{Unowned, Vmar, VmarFlags};
302 use std::sync::atomic::{AtomicU64, Ordering};
303
304 #[test]
305 fn test_create_iob() {
306 let region_size = zx::system_get_page_size() as usize * 8;
307 let (ep0, ep1) = Iob::create(
308 Default::default(),
309 &[IobRegion {
310 region_type: IobRegionType::Private {
311 size: region_size as u64,
312 options: Default::default(),
313 },
314 access: IobAccess::EP0_CAN_MAP_READ
315 | IobAccess::EP0_CAN_MAP_WRITE
316 | IobAccess::EP1_CAN_MAP_READ,
317 discipline: IobDiscipline::None,
318 }],
319 )
320 .expect("create failed");
321
322 let root_vmar =
325 unsafe { Unowned::<Vmar>::from_raw_handle(fuchsia_runtime::zx_vmar_root_self()) };
326
327 let write_addr = root_vmar
328 .map_iob(VmarFlags::PERM_READ | VmarFlags::PERM_WRITE, 0, &ep0, 0, 0, region_size)
329 .expect("map_iob failed");
330 let read_addr = root_vmar
331 .map_iob(VmarFlags::PERM_READ, 0, &ep1, 0, 0, region_size)
332 .expect("map_iob failed");
333
334 const VALUE: u64 = 0x123456789abcdef;
335
336 unsafe { &*(write_addr as *const AtomicU64) }.store(VALUE, Ordering::Relaxed);
337
338 assert_eq!(unsafe { &*(read_addr as *const AtomicU64) }.load(Ordering::Relaxed), VALUE);
339
340 unsafe {
341 root_vmar.unmap(write_addr, region_size).expect("unmap failed");
342 root_vmar.unmap(read_addr, region_size).expect("unmap failed");
343 }
344 }
345}