ebpf/memio/
mod.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::marker::PhantomData;
6use std::ops::RangeBounds;
7use zerocopy::{FromBytes, IntoBytes};
8
9#[cfg(target_arch = "aarch64")]
10mod arm64;
11
12#[cfg(target_arch = "aarch64")]
13use arm64 as arch;
14
15#[cfg(target_arch = "x86_64")]
16mod x64;
17
18#[cfg(target_arch = "x86_64")]
19use x64 as arch;
20
21#[cfg(target_arch = "riscv64")]
22mod riscv64;
23
24#[cfg(target_arch = "riscv64")]
25use riscv64 as arch;
26
27/// Pointer to a buffer that may be shared between eBPF programs. It allows to
28/// safely pass around pointers to the data stored in eBPF maps and access the
29/// data referenced by the pointer.
30pub struct EbpfPtr<'a, T> {
31    ptr: *mut T,
32    phantom: PhantomData<&'a T>,
33}
34
35#[allow(clippy::undocumented_unsafe_blocks, reason = "Force documented unsafe blocks in Starnix")]
36unsafe impl<'a, T> Send for EbpfPtr<'a, T> {}
37#[allow(clippy::undocumented_unsafe_blocks, reason = "Force documented unsafe blocks in Starnix")]
38unsafe impl<'a, T> Sync for EbpfPtr<'a, T> {}
39
40impl<'a, T> EbpfPtr<'a, T> {
41    /// Creates a new `EbpfPtr` from the specified pointer.
42    ///
43    /// # Safety
44    /// Caller must ensure that the buffer referenced by `ptr` is valid for
45    /// lifetime `'a`.
46    pub unsafe fn new(ptr: *mut T) -> Self {
47        Self { ptr, phantom: PhantomData }
48    }
49
50    /// # Safety
51    /// Caller must ensure that the value cannot be updated by other threads
52    /// while the returned reference is live.
53    pub unsafe fn deref(&self) -> &'a T {
54        #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
55        unsafe {
56            &*self.ptr
57        }
58    }
59
60    /// # Safety
61    /// Caller must ensure that the value is not being used by other threads.
62    pub unsafe fn deref_mut(&self) -> &'a mut T {
63        #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
64        unsafe {
65            &mut *self.ptr
66        }
67    }
68}
69
70impl EbpfPtr<'_, u64> {
71    /// Loads the value referenced by the pointer. Atomicity is guaranteed
72    /// if and only if the pointer is 8-byte aligned.
73    pub fn load_relaxed(&self) -> u64 {
74        #[allow(
75            clippy::undocumented_unsafe_blocks,
76            reason = "Force documented unsafe blocks in Starnix"
77        )]
78        unsafe {
79            arch::load_u64(self.ptr)
80        }
81    }
82
83    /// Stores the `value` at the memory referenced by the pointer. Atomicity
84    /// is guaranteed if and only if the pointer is 8-byte aligned.
85    pub fn store_relaxed(&self, value: u64) {
86        #[allow(
87            clippy::undocumented_unsafe_blocks,
88            reason = "Force documented unsafe blocks in Starnix"
89        )]
90        unsafe {
91            arch::store_u64(self.ptr, value)
92        }
93    }
94}
95
96impl EbpfPtr<'_, u32> {
97    /// Loads the value referenced by the pointer. Atomicity is guaranteed
98    /// if and only if the pointer is 8-byte aligned.
99    pub fn load_relaxed(&self) -> u32 {
100        #[allow(
101            clippy::undocumented_unsafe_blocks,
102            reason = "Force documented unsafe blocks in Starnix"
103        )]
104        unsafe {
105            arch::load_u32(self.ptr)
106        }
107    }
108
109    /// Stores the `value` at the memory referenced by the pointer. Atomicity
110    /// is guaranteed if and only if the pointer is 8-byte aligned.
111    pub fn store_relaxed(&self, value: u32) {
112        #[allow(
113            clippy::undocumented_unsafe_blocks,
114            reason = "Force documented unsafe blocks in Starnix"
115        )]
116        unsafe {
117            arch::store_u32(self.ptr, value)
118        }
119    }
120}
121
122/// Wraps a pointer to buffer used in eBPF runtime, such as an eBPF maps
123/// entry. The referenced data may be access from multiple threads in parallel,
124/// which makes it unsafe to access it using standard Rust types.
125/// `EbpfBufferPtr` allows to access these buffers safely. It may be used to
126/// reference either a whole VMO allocated for and eBPF map or individual
127/// elements of that VMO (see `slice()`). The address and the size of the
128/// buffer are always 8-byte aligned.
129#[derive(Copy, Clone)]
130pub struct EbpfBufferPtr<'a> {
131    ptr: *mut u8,
132    size: usize,
133    phantom: PhantomData<&'a u8>,
134}
135
136impl<'a> EbpfBufferPtr<'a> {
137    pub const ALIGNMENT: usize = size_of::<u64>();
138
139    /// Creates a new `EbpfBufferPtr` from the specified pointer. `ptr` must be
140    /// 8-byte aligned. `size` must be multiple of 8.
141    ///
142    /// # Safety
143    /// Caller must ensure that the buffer referenced by `ptr` is valid for
144    /// lifetime `'a`.
145    pub unsafe fn new(ptr: *mut u8, size: usize) -> Self {
146        assert!((ptr as usize) % Self::ALIGNMENT == 0);
147        assert!(size % Self::ALIGNMENT == 0);
148        assert!(size < isize::MAX as usize);
149        Self { ptr, size, phantom: PhantomData }
150    }
151
152    /// Size of the buffer in bytes.
153    pub fn len(&self) -> usize {
154        self.size
155    }
156
157    /// Raw pointer to the start of the buffer.
158    pub fn raw_ptr(&self) -> *mut u8 {
159        self.ptr
160    }
161
162    // SAFETY: caller must ensure that the value at the specified offset fits
163    // the buffer.
164    unsafe fn get_ptr_internal<T>(&self, offset: usize) -> EbpfPtr<'a, T> {
165        #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
166        unsafe {
167            EbpfPtr::new(self.ptr.byte_offset(offset as isize) as *mut T)
168        }
169    }
170
171    /// Returns a pointer to a value of type `T` at the specified `offset`.
172    pub fn get_ptr<T>(&self, offset: usize) -> Option<EbpfPtr<'a, T>> {
173        if offset + std::mem::size_of::<T>() <= self.size {
174            // SAFETY: Buffer bounds are verified above.
175            Some(unsafe { self.get_ptr_internal(offset) })
176        } else {
177            None
178        }
179    }
180
181    /// Returns pointer to the specified range in the buffer.
182    /// Range bounds must be multiple of 8.
183    pub fn slice(&self, range: impl RangeBounds<usize>) -> Option<Self> {
184        let start = match range.start_bound() {
185            std::ops::Bound::Included(&start) => start,
186            std::ops::Bound::Excluded(&start) => start + 1,
187            std::ops::Bound::Unbounded => 0,
188        };
189        let end = match range.end_bound() {
190            std::ops::Bound::Included(&end) => end + 1,
191            std::ops::Bound::Excluded(&end) => end,
192            std::ops::Bound::Unbounded => self.size,
193        };
194
195        assert!(start <= end);
196        (end <= self.size).then(|| {
197            // SAFETY: Returned buffer has the same lifetime as `self`, which
198            // ensures that the `ptr` stays valid for the lifetime of the
199            // result.
200            unsafe {
201                Self {
202                    ptr: self.ptr.byte_offset(start as isize),
203                    size: end - start,
204                    phantom: PhantomData,
205                }
206            }
207        })
208    }
209
210    /// Loads contents of the buffer to a `Vec<u8>`.
211    pub fn load(&self) -> Vec<u8> {
212        let mut result = Vec::with_capacity(self.size);
213
214        for pos in (0..self.size).step_by(Self::ALIGNMENT) {
215            // SAFETY: the offset is guaranteed to be within the buffer bounds.
216            let value: u64 = unsafe { self.get_ptr_internal::<u64>(pos).load_relaxed() };
217            result.extend_from_slice(value.as_bytes());
218        }
219
220        result
221    }
222
223    /// Stores `data` in the buffer. `data` must be the same size as the
224    /// buffer.
225    pub fn store(&self, data: &[u8]) {
226        assert!(data.len() == self.size);
227        self.store_padded(data);
228    }
229
230    /// Stores `data` at the head of the buffer. If `data` is not multiple of 8
231    /// then it's padded at the end with zeros.
232    pub fn store_padded(&self, data: &[u8]) {
233        assert!(data.len() <= self.size);
234
235        let tail = data.len() % 8;
236        let end = data.len() - tail;
237        for pos in (0..end).step_by(Self::ALIGNMENT) {
238            let value = u64::read_from_bytes(&data[pos..(pos + 8)]).unwrap();
239            // SAFETY: pos is guaranteed to be within the buffer bounds.
240            unsafe { self.get_ptr_internal::<u64>(pos).store_relaxed(value) };
241        }
242
243        if tail > 0 {
244            let mut value: u64 = 0;
245            value.as_mut_bytes()[..tail].copy_from_slice(&data[(data.len() - tail)..]);
246            // SAFETY: pos is guaranteed to be within the buffer bounds.
247            unsafe { self.get_ptr_internal::<u64>(data.len() - tail).store_relaxed(value) };
248        }
249    }
250}
251
252#[cfg(test)]
253mod test {
254    use super::*;
255    use fuchsia_runtime::vmar_root_self;
256    use std::sync::Barrier;
257    use std::sync::atomic::{AtomicU32, Ordering};
258    use std::thread;
259
260    #[test]
261    fn test_u64_atomicity() {
262        let vmo_size = zx::system_get_page_size() as usize;
263        let vmo = zx::Vmo::create(vmo_size as u64).unwrap();
264        let addr = vmar_root_self()
265            .map(0, &vmo, 0, vmo_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
266            .unwrap();
267        #[allow(
268            clippy::undocumented_unsafe_blocks,
269            reason = "Force documented unsafe blocks in Starnix"
270        )]
271        let shared_ptr = unsafe { EbpfPtr::new(addr as *mut u64) };
272
273        const NUM_THREADS: usize = 10;
274
275        // Barrier used to synchronize start of the threads.
276        let barrier = Barrier::new(NUM_THREADS * 2);
277
278        let finished_writers = AtomicU32::new(0);
279
280        thread::scope(|scope| {
281            let mut threads = Vec::new();
282
283            for _ in 0..10 {
284                threads.push(scope.spawn(|| {
285                    barrier.wait();
286                    for _ in 0..1000 {
287                        for i in 0..255 {
288                            // Store a value with the same value repeated in every byte.
289                            let v = i << 8 | i;
290                            let v = v << 16 | v;
291                            let v = v << 32 | v;
292                            shared_ptr.store_relaxed(v);
293                        }
294                    }
295                    finished_writers.fetch_add(1, Ordering::Relaxed);
296                }));
297
298                threads.push(scope.spawn(|| {
299                    barrier.wait();
300                    loop {
301                        for _ in 0..1000 {
302                            let v = shared_ptr.load_relaxed();
303                            // Verify that all bytes in `v` are set to the same.
304                            assert!(v >> 32 == v & 0xffff_ffff);
305                            assert!((v >> 16) & 0xffff == v & 0xffff);
306                            assert!((v >> 8) & 0xff == v & 0xff);
307                        }
308                        if finished_writers.load(Ordering::Relaxed) == NUM_THREADS as u32 {
309                            break;
310                        }
311                    }
312                }));
313            }
314
315            for t in threads.into_iter() {
316                t.join().expect("failed to join a test thread");
317            }
318        });
319
320        #[allow(
321            clippy::undocumented_unsafe_blocks,
322            reason = "Force documented unsafe blocks in Starnix"
323        )]
324        unsafe {
325            vmar_root_self().unmap(addr, vmo_size).unwrap()
326        };
327    }
328
329    #[test]
330    fn test_buffer_slice() {
331        const SIZE: usize = 32;
332
333        let mut buf = [0; SIZE];
334        #[allow(
335            clippy::undocumented_unsafe_blocks,
336            reason = "Force documented unsafe blocks in Starnix"
337        )]
338        let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
339
340        buf_ptr.slice(8..16).unwrap().store(&[1, 2, 3, 4, 5, 6, 7, 8]);
341        assert_eq!(
342            buf_ptr.slice(0..24).unwrap().load(),
343            [0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
344        );
345
346        assert!(buf_ptr.slice(8..40).is_none());
347    }
348
349    #[test]
350    fn test_buffer_load() {
351        const SIZE: usize = 32;
352
353        let mut buf = (0..(SIZE as u8)).map(|v| v as u8).collect::<Vec<_>>();
354        #[allow(
355            clippy::undocumented_unsafe_blocks,
356            reason = "Force documented unsafe blocks in Starnix"
357        )]
358        let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
359        let v = buf_ptr.load();
360        assert_eq!(v, (0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
361    }
362
363    #[test]
364    fn test_buffer_store() {
365        const SIZE: usize = 32;
366
367        let mut buf = [0u8; SIZE];
368        #[allow(
369            clippy::undocumented_unsafe_blocks,
370            reason = "Force documented unsafe blocks in Starnix"
371        )]
372        let buf_ptr = unsafe { EbpfBufferPtr::new(buf.as_mut_ptr(), SIZE) };
373
374        // Write values from `s` to `e` to range `s..e`.
375        buf_ptr.store(&(0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
376
377        // Read the content and verify that it matches the expectation.
378        let data = buf_ptr.load();
379        assert_eq!(&data, &(0..SIZE).map(|v| v as u8).collect::<Vec<_>>());
380    }
381}