starnix_core/bpf/
mod.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Implementation of (e)BPF.
6//!
7//! BPF stands for Berkeley Packet Filter and is an API introduced in BSD that allows filtering
8//! network packets by running little programs in the kernel. eBPF stands for extended BFP and
9//! is a Linux extension of BPF that allows hooking BPF programs into many different
10//! non-networking-related contexts.
11
12pub mod attachments;
13pub mod context;
14pub mod fs;
15pub mod program;
16pub mod syscalls;
17
18use crate::bpf::attachments::EbpfAttachments;
19use crate::bpf::program::{ProgramHandle, ProgramId, WeakProgramHandle};
20use crate::mm::memory::MemoryObject;
21use crate::security;
22use crate::task::{CurrentTask, CurrentTaskAndLocked, Kernel, register_delayed_release};
23use ebpf_api::PinnedMap;
24use starnix_lifecycle::{ObjectReleaser, ReleaserAction};
25use starnix_sync::{
26    EbpfMapStateLevel, EbpfStateLock, LockBefore, Locked, MutexGuard, OrderedMutex,
27};
28use starnix_types::ownership::{Releasable, ReleaseGuard};
29use starnix_uapi::error;
30use starnix_uapi::errors::Errno;
31use std::collections::BTreeMap;
32use std::ops::{Bound, Deref};
33use std::sync::atomic::{AtomicU32, Ordering};
34use std::sync::{Arc, Weak};
35
36type BpfMapId = u32;
37
38/// Counter for map identifiers.
39static MAP_IDS: AtomicU32 = AtomicU32::new(1);
40fn new_map_id() -> BpfMapId {
41    MAP_IDS.fetch_add(1, Ordering::Relaxed)
42}
43
44#[derive(Debug, Default)]
45struct BpfMapState {
46    memory_object: Option<Arc<MemoryObject>>,
47    is_frozen: bool,
48}
49
50/// A BPF map and Starnix-specific metadata.
51#[derive(Debug)]
52pub struct BpfMap {
53    id: BpfMapId,
54    map: PinnedMap,
55
56    /// The internal state of the map object.
57    state: OrderedMutex<BpfMapState, EbpfMapStateLevel>,
58
59    /// The security state associated with this bpf Map.
60    pub security_state: security::BpfMapState,
61
62    /// Reference to the `Kernel`. Used to unregister `self` on drop.
63    kernel: Weak<Kernel>,
64}
65
66impl Deref for BpfMap {
67    type Target = PinnedMap;
68    fn deref(&self) -> &PinnedMap {
69        &self.map
70    }
71}
72
73impl BpfMap {
74    pub fn new<L>(
75        locked: &mut Locked<L>,
76        current_task: &CurrentTask,
77        map: PinnedMap,
78        security_state: security::BpfMapState,
79    ) -> BpfMapHandle
80    where
81        L: LockBefore<EbpfStateLock>,
82    {
83        let map = BpfMapHandle::new(
84            Self {
85                id: new_map_id(),
86                map,
87                state: Default::default(),
88                security_state,
89                kernel: Arc::downgrade(current_task.kernel()),
90            }
91            .into(),
92        );
93        current_task.kernel().ebpf_state.register_map(locked, &map);
94        map
95    }
96
97    pub fn id(&self) -> BpfMapId {
98        self.id
99    }
100
101    fn frozen<'a, L>(
102        &'a self,
103        locked: &'a mut Locked<L>,
104    ) -> (impl Deref<Target = bool> + 'a, &'a mut Locked<EbpfMapStateLevel>)
105    where
106        L: LockBefore<EbpfMapStateLevel>,
107    {
108        let (guard, locked) = self.state.lock_and(locked);
109        (MutexGuard::map(guard, |s| &mut s.is_frozen), locked)
110    }
111
112    fn freeze<L>(&self, locked: &mut Locked<L>) -> Result<(), Errno>
113    where
114        L: LockBefore<EbpfMapStateLevel>,
115    {
116        let mut state = self.state.lock(locked);
117        if state.is_frozen {
118            return Ok(());
119        }
120        if let Some(memory) = state.memory_object.take() {
121            // The memory has been computed, check whether it is still in use.
122            if let Err(memory) = Arc::try_unwrap(memory) {
123                // There is other user of the memory. freeze must fail.
124                state.memory_object = Some(memory);
125                return error!(EBUSY);
126            }
127        }
128        state.is_frozen = true;
129        return Ok(());
130    }
131
132    fn get_memory<L, F>(
133        &self,
134        locked: &mut Locked<L>,
135        factory: F,
136    ) -> Result<Arc<MemoryObject>, Errno>
137    where
138        L: LockBefore<EbpfMapStateLevel>,
139        F: FnOnce() -> Result<Arc<MemoryObject>, Errno>,
140    {
141        let mut state = self.state.lock(locked);
142        if state.is_frozen {
143            return error!(EPERM);
144        }
145        if let Some(memory) = state.memory_object.as_ref() {
146            return Ok(memory.clone());
147        }
148        let memory = factory()?;
149        state.memory_object = Some(memory.clone());
150        Ok(memory)
151    }
152}
153
154impl Releasable for BpfMap {
155    type Context<'a> = CurrentTaskAndLocked<'a>;
156
157    fn release<'a>(self, (locked, _current_task): CurrentTaskAndLocked<'a>) {
158        if let Some(kernel) = self.kernel.upgrade() {
159            kernel.ebpf_state.unregister_map(locked, self.id);
160        }
161    }
162}
163
164pub enum BpfMapReleaserAction {}
165impl ReleaserAction<BpfMap> for BpfMapReleaserAction {
166    fn release(map: ReleaseGuard<BpfMap>) {
167        register_delayed_release(map);
168    }
169}
170pub type BpfMapReleaser = ObjectReleaser<BpfMap, BpfMapReleaserAction>;
171pub type BpfMapHandle = Arc<BpfMapReleaser>;
172pub type WeakBpfMapHandle = Weak<BpfMapReleaser>;
173
174/// Stores global eBPF state.
175#[derive(Default)]
176pub struct EbpfState {
177    pub attachments: EbpfAttachments,
178
179    programs: OrderedMutex<BTreeMap<ProgramId, WeakProgramHandle>, EbpfStateLock>,
180    maps: OrderedMutex<BTreeMap<BpfMapId, WeakBpfMapHandle>, EbpfStateLock>,
181}
182
183impl EbpfState {
184    fn register_program<L>(&self, locked: &mut Locked<L>, program: &ProgramHandle)
185    where
186        L: LockBefore<EbpfStateLock>,
187    {
188        self.programs.lock(locked).insert(program.id(), Arc::downgrade(program));
189    }
190
191    fn unregister_program<L>(&self, locked: &mut Locked<L>, id: ProgramId)
192    where
193        L: LockBefore<EbpfStateLock>,
194    {
195        self.programs.lock(locked).remove(&id).expect("Missing eBPF program");
196    }
197
198    fn get_next_program_id<L>(
199        &self,
200        locked: &mut Locked<L>,
201        start_id: ProgramId,
202    ) -> Option<ProgramId>
203    where
204        L: LockBefore<EbpfStateLock>,
205    {
206        self.programs
207            .lock(locked)
208            .range((Bound::Excluded(start_id), Bound::Unbounded))
209            .next()
210            .map(|(k, _)| *k)
211    }
212
213    fn get_program_by_id<L>(&self, locked: &mut Locked<L>, id: ProgramId) -> Option<ProgramHandle>
214    where
215        L: LockBefore<EbpfStateLock>,
216    {
217        self.programs.lock(locked).get(&id).map(|p| p.upgrade()).flatten()
218    }
219
220    fn register_map<L>(&self, locked: &mut Locked<L>, map: &BpfMapHandle)
221    where
222        L: LockBefore<EbpfStateLock>,
223    {
224        self.maps.lock(locked).insert(map.id(), Arc::downgrade(map));
225    }
226
227    fn unregister_map<L>(&self, locked: &mut Locked<L>, id: BpfMapId)
228    where
229        L: LockBefore<EbpfStateLock>,
230    {
231        self.maps.lock(locked).remove(&id).expect("Missing eBPF map");
232    }
233
234    fn get_next_map_id<L>(&self, locked: &mut Locked<L>, start_id: BpfMapId) -> Option<BpfMapId>
235    where
236        L: LockBefore<EbpfStateLock>,
237    {
238        self.maps
239            .lock(locked)
240            .range((Bound::Excluded(start_id), Bound::Unbounded))
241            .next()
242            .map(|(k, _)| *k)
243    }
244
245    fn get_map_by_id<L>(&self, locked: &mut Locked<L>, id: BpfMapId) -> Option<BpfMapHandle>
246    where
247        L: LockBefore<EbpfStateLock>,
248    {
249        self.maps.lock(locked).get(&id).map(|p| p.upgrade()).flatten()
250    }
251}