Skip to main content

ebpf/memio/
mod.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use derivative::Derivative;
6use std::marker::PhantomData;
7use std::ops::RangeBounds;
8use zerocopy::{FromBytes, IntoBytes};
9
10#[cfg(target_arch = "aarch64")]
11mod arm64;
12
13#[cfg(target_arch = "aarch64")]
14use arm64 as arch;
15
16#[cfg(target_arch = "x86_64")]
17mod x64;
18
19#[cfg(target_arch = "x86_64")]
20use x64 as arch;
21
22#[cfg(target_arch = "riscv64")]
23mod riscv64;
24
25#[cfg(target_arch = "riscv64")]
26use riscv64 as arch;
27
28/// Pointer to a buffer that may be shared between eBPF programs. It allows to
29/// safely pass around pointers to the data stored in eBPF maps and access the
30/// data referenced by the pointer.
31#[derive(Derivative)]
32#[derivative(Copy(bound = ""), Clone(bound = ""))]
33pub struct EbpfPtr<'a, T> {
34    ptr: *mut T,
35    phantom: PhantomData<&'a T>,
36}
37
38#[allow(clippy::undocumented_unsafe_blocks, reason = "Force documented unsafe blocks in Starnix")]
39unsafe impl<'a, T> Send for EbpfPtr<'a, T> {}
40#[allow(clippy::undocumented_unsafe_blocks, reason = "Force documented unsafe blocks in Starnix")]
41unsafe impl<'a, T> Sync for EbpfPtr<'a, T> {}
42
43impl<'a, T> EbpfPtr<'a, T>
44where
45    T: Sized,
46{
47    /// Creates a new `EbpfPtr` from the specified pointer.
48    ///
49    /// # Safety
50    /// Caller must ensure that the buffer referenced by `ptr` is valid for
51    /// lifetime `'a` and there are no other mutable references to the same memory.
52    pub unsafe fn new(ptr: *mut T) -> Self {
53        Self { ptr, phantom: PhantomData }
54    }
55
56    /// # Safety
57    /// Caller must ensure that the value cannot be updated by other threads
58    /// while the returned reference is live.
59    pub unsafe fn deref(&self) -> &'a T {
60        #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
61        unsafe {
62            &*self.ptr
63        }
64    }
65
66    /// # Safety
67    /// Caller must ensure that the value is not being used by other threads.
68    pub unsafe fn deref_mut(&self) -> &'a mut T {
69        #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
70        unsafe {
71            &mut *self.ptr
72        }
73    }
74
75    pub fn get_field<F, const OFFSET: usize>(&self) -> EbpfPtr<'a, F> {
76        assert!(OFFSET + std::mem::size_of::<F>() <= std::mem::size_of::<T>());
77        // SAFETY: offset is guaranteed to be within the bounds of the struct,
78        // see the assert above.
79        let field_ptr = unsafe { self.ptr.byte_offset(OFFSET as isize) } as *mut F;
80        EbpfPtr::<'a, F> { ptr: field_ptr, phantom: PhantomData }
81    }
82
83    pub fn ptr(&self) -> *mut T {
84        self.ptr
85    }
86}
87
88impl<'a, T> From<&'a mut T> for EbpfPtr<'a, T>
89where
90    T: IntoBytes + FromBytes + Sized,
91{
92    fn from(value: &'a mut T) -> Self {
93        let ptr = value.as_mut_bytes().as_mut_ptr() as *mut T;
94        // SAFETY: We borrow a mutable reference to T for the lifetime 'a.
95        // This guarantees that the returned pointer is valid for the lifetime
96        // 'a and there are no other mutable references.
97        unsafe { Self::new(ptr) }
98    }
99}
100
101impl EbpfPtr<'_, u64> {
102    /// Loads the value referenced by the pointer. Atomicity is guaranteed
103    /// if and only if the pointer is 8-byte aligned.
104    pub fn load_relaxed(&self) -> u64 {
105        // SAFETY: Atomic load of the value referenced by the pointer.
106        unsafe { arch::load_u64(self.ptr) }
107    }
108
109    /// Stores the `value` at the memory referenced by the pointer. Atomicity
110    /// is guaranteed if and only if the pointer is 8-byte aligned.
111    pub fn store_relaxed(&self, value: u64) {
112        // SAFETY: Atomic store of the value referenced by the pointer.
113        unsafe { arch::store_u64(self.ptr, value) }
114    }
115}
116
117impl EbpfPtr<'_, u32> {
118    /// Loads the value referenced by the pointer. Atomicity is guaranteed
119    /// if and only if the pointer is 4-byte aligned.
120    pub fn load_relaxed(&self) -> u32 {
121        // SAFETY: Atomic load of the value referenced by the pointer.
122        unsafe { arch::load_u32(self.ptr) }
123    }
124
125    /// Stores the `value` at the memory referenced by the pointer. Atomicity
126    /// is guaranteed if and only if the pointer is 4-byte aligned.
127    pub fn store_relaxed(&self, value: u32) {
128        // SAFETY: Atomic store of the value referenced by the pointer.
129        unsafe { arch::store_u32(self.ptr, value) }
130    }
131}
132
133impl EbpfPtr<'_, i32> {
134    /// Loads the value referenced by the pointer. Atomicity is guaranteed
135    /// if and only if the pointer is 4-byte aligned.
136    pub fn load_relaxed(&self) -> i32 {
137        // SAFETY: Atomic load of the value referenced by the pointer.
138        unsafe { arch::load_u32(self.ptr as *mut u32) as i32 }
139    }
140
141    /// Stores the `value` at the memory referenced by the pointer. Atomicity
142    /// is guaranteed if and only if the pointer is 4-byte aligned.
143    pub fn store_relaxed(&self, value: i32) {
144        // SAFETY: Atomic store of the value referenced by the pointer.
145        unsafe { arch::store_u32(self.ptr as *mut u32, value as u32) }
146    }
147}
148
149/// Wraps a pointer to buffer used in eBPF runtime, such as an eBPF maps
150/// entry. The referenced data may be access from multiple threads in parallel,
151/// which makes it unsafe to access it using standard Rust types.
152/// `EbpfBufferPtr` allows to access these buffers safely. It may be used to
153/// reference either a whole VMO allocated for and eBPF map or individual
154/// elements of that VMO (see `slice()`). The address and the size of the
155/// buffer are always 8-byte aligned.
156#[derive(Copy, Clone)]
157pub struct EbpfBufferPtr<'a> {
158    ptr: *mut u8,
159    size: usize,
160    phantom: PhantomData<&'a u8>,
161}
162
163impl<'a> EbpfBufferPtr<'a> {
164    pub const ALIGNMENT: usize = size_of::<u64>();
165
166    /// Creates a new `EbpfBufferPtr` from the specified pointer. `ptr` must be
167    /// 8-byte aligned. `size` must be multiple of 8.
168    ///
169    /// # Safety
170    /// Caller must ensure that the buffer referenced by `ptr` is valid for
171    /// lifetime `'a`.
172    pub unsafe fn new(ptr: *mut u8, size: usize) -> Self {
173        assert!((ptr as usize) % Self::ALIGNMENT == 0);
174        assert!(size % Self::ALIGNMENT == 0);
175        assert!(size < isize::MAX as usize);
176        Self { ptr, size, phantom: PhantomData }
177    }
178
179    /// Size of the buffer in bytes.
180    pub fn len(&self) -> usize {
181        self.size
182    }
183
184    /// Raw pointer to the start of the buffer.
185    pub fn raw_ptr(&self) -> *mut u8 {
186        self.ptr
187    }
188
189    // SAFETY: caller must ensure that the value at the specified offset fits
190    // the buffer.
191    unsafe fn get_ptr_internal<T>(&self, offset: usize) -> EbpfPtr<'a, T> {
192        #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
193        unsafe {
194            EbpfPtr::new(self.ptr.byte_offset(offset as isize) as *mut T)
195        }
196    }
197
198    /// Returns a pointer to a value of type `T` at the specified `offset`.
199    pub fn get_ptr<T>(&self, offset: usize) -> Option<EbpfPtr<'a, T>> {
200        if offset + std::mem::size_of::<T>() <= self.size {
201            // SAFETY: Buffer bounds are verified above.
202            Some(unsafe { self.get_ptr_internal(offset) })
203        } else {
204            None
205        }
206    }
207
208    /// Returns pointer to the specified range in the buffer.
209    /// Range bounds must be multiple of 8.
210    pub fn slice(&self, range: impl RangeBounds<usize>) -> Option<Self> {
211        let start = match range.start_bound() {
212            std::ops::Bound::Included(&start) => start,
213            std::ops::Bound::Excluded(&start) => start + 1,
214            std::ops::Bound::Unbounded => 0,
215        };
216        let end = match range.end_bound() {
217            std::ops::Bound::Included(&end) => end + 1,
218            std::ops::Bound::Excluded(&end) => end,
219            std::ops::Bound::Unbounded => self.size,
220        };
221
222        assert!(start <= end);
223        (end <= self.size).then(|| {
224            // SAFETY: Returned buffer has the same lifetime as `self`, which
225            // ensures that the `ptr` stays valid for the lifetime of the
226            // result.
227            unsafe {
228                Self {
229                    ptr: self.ptr.byte_offset(start as isize),
230                    size: end - start,
231                    phantom: PhantomData,
232                }
233            }
234        })
235    }
236
237    /// Loads contents of the buffer to a `Vec<u8>`.
238    pub fn load(&self) -> Vec<u8> {
239        let mut result = Vec::with_capacity(self.size);
240
241        for pos in (0..self.size).step_by(Self::ALIGNMENT) {
242            // SAFETY: the offset is guaranteed to be within the buffer bounds.
243            let value: u64 = unsafe { self.get_ptr_internal::<u64>(pos).load_relaxed() };
244            result.extend_from_slice(value.as_bytes());
245        }
246
247        result
248    }
249
250    /// Stores `data` in the buffer. `data` must be the same size as the
251    /// buffer.
252    pub fn store(&self, data: &[u8]) {
253        assert!(data.len() == self.size);
254        self.store_padded(data);
255    }
256
257    /// Stores `data` at the head of the buffer. If `data` is not multiple of 8
258    /// then it's padded at the end with zeros.
259    pub fn store_padded(&self, data: &[u8]) {
260        assert!(data.len() <= self.size);
261
262        let tail = data.len() % 8;
263        let end = data.len() - tail;
264        for pos in (0..end).step_by(Self::ALIGNMENT) {
265            let value = u64::read_from_bytes(&data[pos..(pos + 8)]).unwrap();
266            // SAFETY: pos is guaranteed to be within the buffer bounds.
267            unsafe { self.get_ptr_internal::<u64>(pos).store_relaxed(value) };
268        }
269
270        if tail > 0 {
271            let mut value: u64 = 0;
272            value.as_mut_bytes()[..tail].copy_from_slice(&data[(data.len() - tail)..]);
273            // SAFETY: pos is guaranteed to be within the buffer bounds.
274            unsafe { self.get_ptr_internal::<u64>(data.len() - tail).store_relaxed(value) };
275        }
276    }
277}
278
279#[cfg(test)]
280mod test {
281    use super::*;
282    use fuchsia_runtime::vmar_root_self;
283    use std::sync::Barrier;
284    use std::sync::atomic::{AtomicU32, Ordering};
285    use std::thread;
286
287    #[test]
288    fn test_u64_atomicity() {
289        let vmo_size = zx::system_get_page_size() as usize;
290        let vmo = zx::Vmo::create(vmo_size as u64).unwrap();
291        let addr = vmar_root_self()
292            .map(0, &vmo, 0, vmo_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
293            .unwrap();
294        #[allow(
295            clippy::undocumented_unsafe_blocks,
296            reason = "Force documented unsafe blocks in Starnix"
297        )]
298        let shared_ptr = unsafe { EbpfPtr::new(addr as *mut u64) };
299
300        const NUM_THREADS: usize = 10;
301
302        // Barrier used to synchronize start of the threads.
303        let barrier = Barrier::new(NUM_THREADS * 2);
304
305        let finished_writers = AtomicU32::new(0);
306
307        thread::scope(|scope| {
308            let mut threads = Vec::new();
309
310            for _ in 0..10 {
311                threads.push(scope.spawn(|| {
312                    barrier.wait();
313                    for _ in 0..1000 {
314                        for i in 0..255 {
315                            // Store a value with the same value repeated in every byte.
316                            let v = i << 8 | i;
317                            let v = v << 16 | v;
318                            let v = v << 32 | v;
319                            shared_ptr.store_relaxed(v);
320                        }
321                    }
322                    finished_writers.fetch_add(1, Ordering::Relaxed);
323                }));
324
325                threads.push(scope.spawn(|| {
326                    barrier.wait();
327                    loop {
328                        for _ in 0..1000 {
329                            let v = shared_ptr.load_relaxed();
330                            // Verify that all bytes in `v` are set to the same.
331                            assert!(v >> 32 == v & 0xffff_ffff);
332                            assert!((v >> 16) & 0xffff == v & 0xffff);
333                            assert!((v >> 8) & 0xff == v & 0xff);
334                        }
335                        if finished_writers.load(Ordering::Relaxed) == NUM_THREADS as u32 {
336                            break;
337                        }
338                    }
339                }));
340            }
341
342            for t in threads.into_iter() {
343                t.join().expect("failed to join a test thread");
344            }
345        });
346
347        #[allow(
348            clippy::undocumented_unsafe_blocks,
349            reason = "Force documented unsafe blocks in Starnix"
350        )]
351        unsafe {
352            vmar_root_self().unmap(addr, vmo_size).unwrap()
353        };
354    }
355
356    #[test]
357    fn test_buffer_slice() {
358        const SIZE: usize = 32;
359
360        let mut buf = [0; SIZE];
361        #[allow(
362            clippy::undocumented_unsafe_blocks,
363            reason = "Force documented unsafe blocks in Starnix"
364        )]
365        let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
366
367        buf_ptr.slice(8..16).unwrap().store(&[1, 2, 3, 4, 5, 6, 7, 8]);
368        assert_eq!(
369            buf_ptr.slice(0..24).unwrap().load(),
370            [0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
371        );
372
373        assert!(buf_ptr.slice(8..40).is_none());
374    }
375
376    #[test]
377    fn test_buffer_load() {
378        const SIZE: usize = 32;
379
380        let mut buf = (0..(SIZE as u8)).map(|v| v as u8).collect::<Vec<_>>();
381        #[allow(
382            clippy::undocumented_unsafe_blocks,
383            reason = "Force documented unsafe blocks in Starnix"
384        )]
385        let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
386        let v = buf_ptr.load();
387        assert_eq!(v, (0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
388    }
389
390    #[test]
391    fn test_buffer_store() {
392        const SIZE: usize = 32;
393
394        let mut buf = [0u8; SIZE];
395        #[allow(
396            clippy::undocumented_unsafe_blocks,
397            reason = "Force documented unsafe blocks in Starnix"
398        )]
399        let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
400
401        // Write values from `s` to `e` to range `s..e`.
402        buf_ptr.store(&(0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
403
404        // Read the content and verify that it matches the expectation.
405        let data = buf_ptr.load();
406        assert_eq!(&data, &(0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
407    }
408}