1use derivative::Derivative;
6use std::marker::PhantomData;
7use std::ops::RangeBounds;
8use zerocopy::{FromBytes, IntoBytes};
9
10#[cfg(target_arch = "aarch64")]
11mod arm64;
12
13#[cfg(target_arch = "aarch64")]
14use arm64 as arch;
15
16#[cfg(target_arch = "x86_64")]
17mod x64;
18
19#[cfg(target_arch = "x86_64")]
20use x64 as arch;
21
22#[cfg(target_arch = "riscv64")]
23mod riscv64;
24
25#[cfg(target_arch = "riscv64")]
26use riscv64 as arch;
27
28#[derive(Derivative)]
32#[derivative(Copy(bound = ""), Clone(bound = ""))]
33pub struct EbpfPtr<'a, T> {
34 ptr: *mut T,
35 phantom: PhantomData<&'a T>,
36}
37
38#[allow(clippy::undocumented_unsafe_blocks, reason = "Force documented unsafe blocks in Starnix")]
39unsafe impl<'a, T> Send for EbpfPtr<'a, T> {}
40#[allow(clippy::undocumented_unsafe_blocks, reason = "Force documented unsafe blocks in Starnix")]
41unsafe impl<'a, T> Sync for EbpfPtr<'a, T> {}
42
43impl<'a, T> EbpfPtr<'a, T>
44where
45 T: Sized,
46{
47 pub unsafe fn new(ptr: *mut T) -> Self {
53 Self { ptr, phantom: PhantomData }
54 }
55
56 pub unsafe fn deref(&self) -> &'a T {
60 #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
61 unsafe {
62 &*self.ptr
63 }
64 }
65
66 pub unsafe fn deref_mut(&self) -> &'a mut T {
69 #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
70 unsafe {
71 &mut *self.ptr
72 }
73 }
74
75 pub fn get_field<F, const OFFSET: usize>(&self) -> EbpfPtr<'a, F> {
76 assert!(OFFSET + std::mem::size_of::<F>() <= std::mem::size_of::<T>());
77 let field_ptr = unsafe { self.ptr.byte_offset(OFFSET as isize) } as *mut F;
80 EbpfPtr::<'a, F> { ptr: field_ptr, phantom: PhantomData }
81 }
82
83 pub fn ptr(&self) -> *mut T {
84 self.ptr
85 }
86}
87
88impl<'a, T> From<&'a mut T> for EbpfPtr<'a, T>
89where
90 T: IntoBytes + FromBytes + Sized,
91{
92 fn from(value: &'a mut T) -> Self {
93 let ptr = value.as_mut_bytes().as_mut_ptr() as *mut T;
94 unsafe { Self::new(ptr) }
98 }
99}
100
101impl EbpfPtr<'_, u64> {
102 pub fn load_relaxed(&self) -> u64 {
105 unsafe { arch::load_u64(self.ptr) }
107 }
108
109 pub fn store_relaxed(&self, value: u64) {
112 unsafe { arch::store_u64(self.ptr, value) }
114 }
115}
116
117impl EbpfPtr<'_, u32> {
118 pub fn load_relaxed(&self) -> u32 {
121 unsafe { arch::load_u32(self.ptr) }
123 }
124
125 pub fn store_relaxed(&self, value: u32) {
128 unsafe { arch::store_u32(self.ptr, value) }
130 }
131}
132
133impl EbpfPtr<'_, i32> {
134 pub fn load_relaxed(&self) -> i32 {
137 unsafe { arch::load_u32(self.ptr as *mut u32) as i32 }
139 }
140
141 pub fn store_relaxed(&self, value: i32) {
144 unsafe { arch::store_u32(self.ptr as *mut u32, value as u32) }
146 }
147}
148
149#[derive(Copy, Clone)]
157pub struct EbpfBufferPtr<'a> {
158 ptr: *mut u8,
159 size: usize,
160 phantom: PhantomData<&'a u8>,
161}
162
163impl<'a> EbpfBufferPtr<'a> {
164 pub const ALIGNMENT: usize = size_of::<u64>();
165
166 pub unsafe fn new(ptr: *mut u8, size: usize) -> Self {
173 assert!((ptr as usize) % Self::ALIGNMENT == 0);
174 assert!(size % Self::ALIGNMENT == 0);
175 assert!(size < isize::MAX as usize);
176 Self { ptr, size, phantom: PhantomData }
177 }
178
179 pub fn len(&self) -> usize {
181 self.size
182 }
183
184 pub fn raw_ptr(&self) -> *mut u8 {
186 self.ptr
187 }
188
189 unsafe fn get_ptr_internal<T>(&self, offset: usize) -> EbpfPtr<'a, T> {
192 #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
193 unsafe {
194 EbpfPtr::new(self.ptr.byte_offset(offset as isize) as *mut T)
195 }
196 }
197
198 pub fn get_ptr<T>(&self, offset: usize) -> Option<EbpfPtr<'a, T>> {
200 if offset + std::mem::size_of::<T>() <= self.size {
201 Some(unsafe { self.get_ptr_internal(offset) })
203 } else {
204 None
205 }
206 }
207
208 pub fn slice(&self, range: impl RangeBounds<usize>) -> Option<Self> {
211 let start = match range.start_bound() {
212 std::ops::Bound::Included(&start) => start,
213 std::ops::Bound::Excluded(&start) => start + 1,
214 std::ops::Bound::Unbounded => 0,
215 };
216 let end = match range.end_bound() {
217 std::ops::Bound::Included(&end) => end + 1,
218 std::ops::Bound::Excluded(&end) => end,
219 std::ops::Bound::Unbounded => self.size,
220 };
221
222 assert!(start <= end);
223 (end <= self.size).then(|| {
224 unsafe {
228 Self {
229 ptr: self.ptr.byte_offset(start as isize),
230 size: end - start,
231 phantom: PhantomData,
232 }
233 }
234 })
235 }
236
237 pub fn load(&self) -> Vec<u8> {
239 let mut result = Vec::with_capacity(self.size);
240
241 for pos in (0..self.size).step_by(Self::ALIGNMENT) {
242 let value: u64 = unsafe { self.get_ptr_internal::<u64>(pos).load_relaxed() };
244 result.extend_from_slice(value.as_bytes());
245 }
246
247 result
248 }
249
250 pub fn store(&self, data: &[u8]) {
253 assert!(data.len() == self.size);
254 self.store_padded(data);
255 }
256
257 pub fn store_padded(&self, data: &[u8]) {
260 assert!(data.len() <= self.size);
261
262 let tail = data.len() % 8;
263 let end = data.len() - tail;
264 for pos in (0..end).step_by(Self::ALIGNMENT) {
265 let value = u64::read_from_bytes(&data[pos..(pos + 8)]).unwrap();
266 unsafe { self.get_ptr_internal::<u64>(pos).store_relaxed(value) };
268 }
269
270 if tail > 0 {
271 let mut value: u64 = 0;
272 value.as_mut_bytes()[..tail].copy_from_slice(&data[(data.len() - tail)..]);
273 unsafe { self.get_ptr_internal::<u64>(data.len() - tail).store_relaxed(value) };
275 }
276 }
277}
278
279#[cfg(test)]
280mod test {
281 use super::*;
282 use fuchsia_runtime::vmar_root_self;
283 use std::sync::Barrier;
284 use std::sync::atomic::{AtomicU32, Ordering};
285 use std::thread;
286
287 #[test]
288 fn test_u64_atomicity() {
289 let vmo_size = zx::system_get_page_size() as usize;
290 let vmo = zx::Vmo::create(vmo_size as u64).unwrap();
291 let addr = vmar_root_self()
292 .map(0, &vmo, 0, vmo_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
293 .unwrap();
294 #[allow(
295 clippy::undocumented_unsafe_blocks,
296 reason = "Force documented unsafe blocks in Starnix"
297 )]
298 let shared_ptr = unsafe { EbpfPtr::new(addr as *mut u64) };
299
300 const NUM_THREADS: usize = 10;
301
302 let barrier = Barrier::new(NUM_THREADS * 2);
304
305 let finished_writers = AtomicU32::new(0);
306
307 thread::scope(|scope| {
308 let mut threads = Vec::new();
309
310 for _ in 0..10 {
311 threads.push(scope.spawn(|| {
312 barrier.wait();
313 for _ in 0..1000 {
314 for i in 0..255 {
315 let v = i << 8 | i;
317 let v = v << 16 | v;
318 let v = v << 32 | v;
319 shared_ptr.store_relaxed(v);
320 }
321 }
322 finished_writers.fetch_add(1, Ordering::Relaxed);
323 }));
324
325 threads.push(scope.spawn(|| {
326 barrier.wait();
327 loop {
328 for _ in 0..1000 {
329 let v = shared_ptr.load_relaxed();
330 assert!(v >> 32 == v & 0xffff_ffff);
332 assert!((v >> 16) & 0xffff == v & 0xffff);
333 assert!((v >> 8) & 0xff == v & 0xff);
334 }
335 if finished_writers.load(Ordering::Relaxed) == NUM_THREADS as u32 {
336 break;
337 }
338 }
339 }));
340 }
341
342 for t in threads.into_iter() {
343 t.join().expect("failed to join a test thread");
344 }
345 });
346
347 #[allow(
348 clippy::undocumented_unsafe_blocks,
349 reason = "Force documented unsafe blocks in Starnix"
350 )]
351 unsafe {
352 vmar_root_self().unmap(addr, vmo_size).unwrap()
353 };
354 }
355
356 #[test]
357 fn test_buffer_slice() {
358 const SIZE: usize = 32;
359
360 let mut buf = [0; SIZE];
361 #[allow(
362 clippy::undocumented_unsafe_blocks,
363 reason = "Force documented unsafe blocks in Starnix"
364 )]
365 let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
366
367 buf_ptr.slice(8..16).unwrap().store(&[1, 2, 3, 4, 5, 6, 7, 8]);
368 assert_eq!(
369 buf_ptr.slice(0..24).unwrap().load(),
370 [0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
371 );
372
373 assert!(buf_ptr.slice(8..40).is_none());
374 }
375
376 #[test]
377 fn test_buffer_load() {
378 const SIZE: usize = 32;
379
380 let mut buf = (0..(SIZE as u8)).map(|v| v as u8).collect::<Vec<_>>();
381 #[allow(
382 clippy::undocumented_unsafe_blocks,
383 reason = "Force documented unsafe blocks in Starnix"
384 )]
385 let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
386 let v = buf_ptr.load();
387 assert_eq!(v, (0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
388 }
389
390 #[test]
391 fn test_buffer_store() {
392 const SIZE: usize = 32;
393
394 let mut buf = [0u8; SIZE];
395 #[allow(
396 clippy::undocumented_unsafe_blocks,
397 reason = "Force documented unsafe blocks in Starnix"
398 )]
399 let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
400
401 buf_ptr.store(&(0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
403
404 let data = buf_ptr.load();
406 assert_eq!(&data, &(0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
407 }
408}