1use std::marker::PhantomData;
6use std::ops::RangeBounds;
7use zerocopy::{FromBytes, IntoBytes};
8
9#[cfg(target_arch = "aarch64")]
10mod arm64;
11
12#[cfg(target_arch = "aarch64")]
13use arm64 as arch;
14
15#[cfg(target_arch = "x86_64")]
16mod x64;
17
18#[cfg(target_arch = "x86_64")]
19use x64 as arch;
20
21#[cfg(target_arch = "riscv64")]
22mod riscv64;
23
24#[cfg(target_arch = "riscv64")]
25use riscv64 as arch;
26
27pub struct EbpfPtr<'a, T> {
31 ptr: *mut T,
32 phantom: PhantomData<&'a T>,
33}
34
35#[allow(clippy::undocumented_unsafe_blocks, reason = "Force documented unsafe blocks in Starnix")]
36unsafe impl<'a, T> Send for EbpfPtr<'a, T> {}
37#[allow(clippy::undocumented_unsafe_blocks, reason = "Force documented unsafe blocks in Starnix")]
38unsafe impl<'a, T> Sync for EbpfPtr<'a, T> {}
39
40impl<'a, T> EbpfPtr<'a, T> {
41 pub unsafe fn new(ptr: *mut T) -> Self {
47 Self { ptr, phantom: PhantomData }
48 }
49
50 pub unsafe fn deref(&self) -> &'a T {
54 #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
55 unsafe {
56 &*self.ptr
57 }
58 }
59
60 pub unsafe fn deref_mut(&self) -> &'a mut T {
63 #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
64 unsafe {
65 &mut *self.ptr
66 }
67 }
68}
69
70impl EbpfPtr<'_, u64> {
71 pub fn load_relaxed(&self) -> u64 {
74 #[allow(
75 clippy::undocumented_unsafe_blocks,
76 reason = "Force documented unsafe blocks in Starnix"
77 )]
78 unsafe {
79 arch::load_u64(self.ptr)
80 }
81 }
82
83 pub fn store_relaxed(&self, value: u64) {
86 #[allow(
87 clippy::undocumented_unsafe_blocks,
88 reason = "Force documented unsafe blocks in Starnix"
89 )]
90 unsafe {
91 arch::store_u64(self.ptr, value)
92 }
93 }
94}
95
96impl EbpfPtr<'_, u32> {
97 pub fn load_relaxed(&self) -> u32 {
100 #[allow(
101 clippy::undocumented_unsafe_blocks,
102 reason = "Force documented unsafe blocks in Starnix"
103 )]
104 unsafe {
105 arch::load_u32(self.ptr)
106 }
107 }
108
109 pub fn store_relaxed(&self, value: u32) {
112 #[allow(
113 clippy::undocumented_unsafe_blocks,
114 reason = "Force documented unsafe blocks in Starnix"
115 )]
116 unsafe {
117 arch::store_u32(self.ptr, value)
118 }
119 }
120}
121
122#[derive(Copy, Clone)]
130pub struct EbpfBufferPtr<'a> {
131 ptr: *mut u8,
132 size: usize,
133 phantom: PhantomData<&'a u8>,
134}
135
136impl<'a> EbpfBufferPtr<'a> {
137 pub const ALIGNMENT: usize = size_of::<u64>();
138
139 pub unsafe fn new(ptr: *mut u8, size: usize) -> Self {
146 assert!((ptr as usize) % Self::ALIGNMENT == 0);
147 assert!(size % Self::ALIGNMENT == 0);
148 assert!(size < isize::MAX as usize);
149 Self { ptr, size, phantom: PhantomData }
150 }
151
152 pub fn len(&self) -> usize {
154 self.size
155 }
156
157 pub fn raw_ptr(&self) -> *mut u8 {
159 self.ptr
160 }
161
162 unsafe fn get_ptr_internal<T>(&self, offset: usize) -> EbpfPtr<'a, T> {
165 #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
166 unsafe {
167 EbpfPtr::new(self.ptr.byte_offset(offset as isize) as *mut T)
168 }
169 }
170
171 pub fn get_ptr<T>(&self, offset: usize) -> Option<EbpfPtr<'a, T>> {
173 if offset + std::mem::size_of::<T>() <= self.size {
174 Some(unsafe { self.get_ptr_internal(offset) })
176 } else {
177 None
178 }
179 }
180
181 pub fn slice(&self, range: impl RangeBounds<usize>) -> Option<Self> {
184 let start = match range.start_bound() {
185 std::ops::Bound::Included(&start) => start,
186 std::ops::Bound::Excluded(&start) => start + 1,
187 std::ops::Bound::Unbounded => 0,
188 };
189 let end = match range.end_bound() {
190 std::ops::Bound::Included(&end) => end + 1,
191 std::ops::Bound::Excluded(&end) => end,
192 std::ops::Bound::Unbounded => self.size,
193 };
194
195 assert!(start <= end);
196 (end <= self.size).then(|| {
197 unsafe {
201 Self {
202 ptr: self.ptr.byte_offset(start as isize),
203 size: end - start,
204 phantom: PhantomData,
205 }
206 }
207 })
208 }
209
210 pub fn load(&self) -> Vec<u8> {
212 let mut result = Vec::with_capacity(self.size);
213
214 for pos in (0..self.size).step_by(Self::ALIGNMENT) {
215 let value: u64 = unsafe { self.get_ptr_internal::<u64>(pos).load_relaxed() };
217 result.extend_from_slice(value.as_bytes());
218 }
219
220 result
221 }
222
223 pub fn store(&self, data: &[u8]) {
226 assert!(data.len() == self.size);
227 self.store_padded(data);
228 }
229
230 pub fn store_padded(&self, data: &[u8]) {
233 assert!(data.len() <= self.size);
234
235 let tail = data.len() % 8;
236 let end = data.len() - tail;
237 for pos in (0..end).step_by(Self::ALIGNMENT) {
238 let value = u64::read_from_bytes(&data[pos..(pos + 8)]).unwrap();
239 unsafe { self.get_ptr_internal::<u64>(pos).store_relaxed(value) };
241 }
242
243 if tail > 0 {
244 let mut value: u64 = 0;
245 value.as_mut_bytes()[..tail].copy_from_slice(&data[(data.len() - tail)..]);
246 unsafe { self.get_ptr_internal::<u64>(data.len() - tail).store_relaxed(value) };
248 }
249 }
250}
251
252#[cfg(test)]
253mod test {
254 use super::*;
255 use fuchsia_runtime::vmar_root_self;
256 use std::sync::Barrier;
257 use std::sync::atomic::{AtomicU32, Ordering};
258 use std::thread;
259
260 #[test]
261 fn test_u64_atomicity() {
262 let vmo_size = zx::system_get_page_size() as usize;
263 let vmo = zx::Vmo::create(vmo_size as u64).unwrap();
264 let addr = vmar_root_self()
265 .map(0, &vmo, 0, vmo_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
266 .unwrap();
267 #[allow(
268 clippy::undocumented_unsafe_blocks,
269 reason = "Force documented unsafe blocks in Starnix"
270 )]
271 let shared_ptr = unsafe { EbpfPtr::new(addr as *mut u64) };
272
273 const NUM_THREADS: usize = 10;
274
275 let barrier = Barrier::new(NUM_THREADS * 2);
277
278 let finished_writers = AtomicU32::new(0);
279
280 thread::scope(|scope| {
281 let mut threads = Vec::new();
282
283 for _ in 0..10 {
284 threads.push(scope.spawn(|| {
285 barrier.wait();
286 for _ in 0..1000 {
287 for i in 0..255 {
288 let v = i << 8 | i;
290 let v = v << 16 | v;
291 let v = v << 32 | v;
292 shared_ptr.store_relaxed(v);
293 }
294 }
295 finished_writers.fetch_add(1, Ordering::Relaxed);
296 }));
297
298 threads.push(scope.spawn(|| {
299 barrier.wait();
300 loop {
301 for _ in 0..1000 {
302 let v = shared_ptr.load_relaxed();
303 assert!(v >> 32 == v & 0xffff_ffff);
305 assert!((v >> 16) & 0xffff == v & 0xffff);
306 assert!((v >> 8) & 0xff == v & 0xff);
307 }
308 if finished_writers.load(Ordering::Relaxed) == NUM_THREADS as u32 {
309 break;
310 }
311 }
312 }));
313 }
314
315 for t in threads.into_iter() {
316 t.join().expect("failed to join a test thread");
317 }
318 });
319
320 #[allow(
321 clippy::undocumented_unsafe_blocks,
322 reason = "Force documented unsafe blocks in Starnix"
323 )]
324 unsafe {
325 vmar_root_self().unmap(addr, vmo_size).unwrap()
326 };
327 }
328
329 #[test]
330 fn test_buffer_slice() {
331 const SIZE: usize = 32;
332
333 let mut buf = [0; SIZE];
334 #[allow(
335 clippy::undocumented_unsafe_blocks,
336 reason = "Force documented unsafe blocks in Starnix"
337 )]
338 let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
339
340 buf_ptr.slice(8..16).unwrap().store(&[1, 2, 3, 4, 5, 6, 7, 8]);
341 assert_eq!(
342 buf_ptr.slice(0..24).unwrap().load(),
343 [0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
344 );
345
346 assert!(buf_ptr.slice(8..40).is_none());
347 }
348
349 #[test]
350 fn test_buffer_load() {
351 const SIZE: usize = 32;
352
353 let mut buf = (0..(SIZE as u8)).map(|v| v as u8).collect::<Vec<_>>();
354 #[allow(
355 clippy::undocumented_unsafe_blocks,
356 reason = "Force documented unsafe blocks in Starnix"
357 )]
358 let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
359 let v = buf_ptr.load();
360 assert_eq!(v, (0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
361 }
362
363 #[test]
364 fn test_buffer_store() {
365 const SIZE: usize = 32;
366
367 let mut buf = [0u8; SIZE];
368 #[allow(
369 clippy::undocumented_unsafe_blocks,
370 reason = "Force documented unsafe blocks in Starnix"
371 )]
372 let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
373
374 buf_ptr.store(&(0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
376
377 let data = buf_ptr.load();
379 assert_eq!(&data, &(0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
380 }
381}