ebpf/
executor.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::visitor::{BpfVisitor, ProgramCounter, Register, Source};
6use crate::{
7    BPF_STACK_SIZE, BpfValue, DataWidth, EbpfInstruction, EbpfProgramContext, FromBpfValue,
8    GENERAL_REGISTER_COUNT, HelperSet, Packet,
9};
10use byteorder::{BigEndian, ByteOrder, LittleEndian};
11use std::mem::MaybeUninit;
12use std::pin::Pin;
13use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
14use zerocopy::IntoBytes;
15
16pub fn execute<C: EbpfProgramContext>(
17    code: &[EbpfInstruction],
18    helpers: &HelperSet<C>,
19    run_context: &mut C::RunContext<'_>,
20    arguments: &[BpfValue],
21) -> u64 {
22    assert!(arguments.len() < 5);
23    let mut context = ComputationContext {
24        code,
25        helpers,
26        registers: Default::default(),
27        stack: vec![MaybeUninit::uninit(); BPF_STACK_SIZE / std::mem::size_of::<BpfValue>()]
28            .into_boxed_slice()
29            .into(),
30        pc: 0,
31        result: None,
32    };
33    for (i, v) in arguments.iter().enumerate() {
34        // Arguments are in registers r1 to r5.
35        context.set_reg((i as u8) + 1, *v);
36    }
37
38    // R10 points at the stack.
39    context.registers[10] =
40        BpfValue::from((context.stack.as_mut_ptr() as u64) + (BPF_STACK_SIZE as u64));
41
42    loop {
43        if let Some(result) = context.result {
44            return result;
45        }
46        context.visit(run_context, code[context.pc]).expect("verifier should have found an issue");
47        context.next();
48    }
49}
50
51impl BpfValue {
52    #[inline(always)]
53    pub fn add(&self, offset: u64) -> Self {
54        Self::from(self.as_u64().overflowing_add(offset).0)
55    }
56}
57
58/// The state of the computation as known by the interpreter at a given point in time.
59struct ComputationContext<'a, C: EbpfProgramContext> {
60    /// The program being executed.
61    code: &'a [EbpfInstruction],
62    /// Helpers.
63    helpers: &'a HelperSet<C>,
64    /// Registers.
65    registers: [BpfValue; GENERAL_REGISTER_COUNT as usize + 1],
66    /// The state of the stack.
67    stack: Pin<Box<[MaybeUninit<BpfValue>]>>,
68    /// The program counter.
69    pc: ProgramCounter,
70    /// The result, set to Some(value) when the program terminates.
71    result: Option<u64>,
72}
73
74impl<C: EbpfProgramContext> ComputationContext<'_, C> {
75    #[inline(always)]
76    fn reg(&mut self, index: Register) -> BpfValue {
77        self.registers[index as usize]
78    }
79
80    #[inline(always)]
81    fn set_reg(&mut self, index: Register, value: BpfValue) {
82        self.registers[index as usize] = value;
83    }
84
85    #[inline(always)]
86    fn next(&mut self) {
87        self.advance_pc(1)
88    }
89
90    /// Adds `offset` to the program counter in `ComputationContext`.
91    #[inline(always)]
92    fn advance_pc(&mut self, offset: i16) {
93        let mut pc = self.pc as i64;
94        pc += offset as i64;
95        self.pc = pc as usize;
96    }
97
98    #[inline(always)]
99    fn store_memory(
100        &mut self,
101        addr: BpfValue,
102        value: BpfValue,
103        instruction_offset: u64,
104        width: DataWidth,
105    ) {
106        // SAFETY
107        //
108        // The address has been verified by the verifier that ensured the memory is valid for
109        // writing.
110        let addr = addr.add(instruction_offset);
111        #[allow(
112            clippy::undocumented_unsafe_blocks,
113            reason = "Force documented unsafe blocks in Starnix"
114        )]
115        match width {
116            DataWidth::U8 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u8()) },
117            DataWidth::U16 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u16()) },
118            DataWidth::U32 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u32()) },
119            DataWidth::U64 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u64()) },
120        }
121    }
122
123    #[inline(always)]
124    fn load_memory(&self, addr: BpfValue, instruction_offset: u64, width: DataWidth) -> BpfValue {
125        // SAFETY
126        //
127        // The address has been verified by the verifier that ensured the memory is valid for
128        // reading.
129        let addr = addr.add(instruction_offset);
130        match width {
131            DataWidth::U8 =>
132            {
133                #[allow(
134                    clippy::undocumented_unsafe_blocks,
135                    reason = "Force documented unsafe blocks in Starnix"
136                )]
137                BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u8>()) })
138            }
139            DataWidth::U16 =>
140            {
141                #[allow(
142                    clippy::undocumented_unsafe_blocks,
143                    reason = "Force documented unsafe blocks in Starnix"
144                )]
145                BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u16>()) })
146            }
147            DataWidth::U32 =>
148            {
149                #[allow(
150                    clippy::undocumented_unsafe_blocks,
151                    reason = "Force documented unsafe blocks in Starnix"
152                )]
153                BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u32>()) })
154            }
155            DataWidth::U64 =>
156            {
157                #[allow(
158                    clippy::undocumented_unsafe_blocks,
159                    reason = "Force documented unsafe blocks in Starnix"
160                )]
161                BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u64>()) })
162            }
163        }
164    }
165
166    #[inline(always)]
167    fn compute_source(&mut self, src: Source) -> BpfValue {
168        match src {
169            Source::Reg(reg) => self.reg(reg),
170            Source::Value(v) => v.into(),
171        }
172    }
173
174    #[inline(always)]
175    fn alu(
176        &mut self,
177        dst: Register,
178        src: Source,
179        op: impl Fn(u64, u64) -> u64,
180    ) -> Result<(), String> {
181        let op1 = self.reg(dst).as_u64();
182        let op2 = self.compute_source(src).as_u64();
183        let result = op(op1, op2);
184        self.set_reg(dst, result.into());
185        Ok(())
186    }
187
188    #[inline(always)]
189    fn atomic_operation(
190        &mut self,
191        fetch: bool,
192        dst: Register,
193        offset: i16,
194        src: Register,
195        op: impl Fn(&mut Self, &AtomicU32, u32) -> u32,
196    ) -> Result<(), String> {
197        let addr = self.reg(dst).add(offset as u64);
198        // TODO How to statically check alignment?
199        if addr.as_usize() % std::mem::size_of::<AtomicU32>() != 0 {
200            return Err(format!("misaligned access"));
201        }
202        // SAFETY
203        //
204        // The address has been verified by the verifier that ensured the memory is valid for
205        // reading and writing.
206        #[allow(
207            clippy::undocumented_unsafe_blocks,
208            reason = "Force documented unsafe blocks in Starnix"
209        )]
210        let atomic = unsafe { &*addr.as_ptr::<AtomicU32>() };
211        let value = self.reg(src).as_u32();
212        let old_value = op(self, atomic, value);
213        if fetch {
214            self.set_reg(src, old_value.into());
215        }
216        Ok(())
217    }
218
219    #[inline(always)]
220    fn atomic_operation64(
221        &mut self,
222        fetch: bool,
223        dst: Register,
224        offset: i16,
225        src: Register,
226        op: impl Fn(&mut Self, &AtomicU64, u64) -> u64,
227    ) -> Result<(), String> {
228        let addr = self.reg(dst).add(offset as u64);
229        // TODO How to statically check alignment?
230        if addr.as_usize() % std::mem::size_of::<AtomicU64>() != 0 {
231            return Err(format!("misaligned access"));
232        }
233        // SAFETY
234        //
235        // The address has been verified by the verifier that ensured the memory is valid for
236        // reading and writing.
237        #[allow(
238            clippy::undocumented_unsafe_blocks,
239            reason = "Force documented unsafe blocks in Starnix"
240        )]
241        let atomic = unsafe { &*addr.as_ptr::<AtomicU64>() };
242        let value = self.reg(src).as_u64();
243        let old_value = op(self, atomic, value);
244        if fetch {
245            self.set_reg(src, old_value.into());
246        }
247        Ok(())
248    }
249
250    #[inline(always)]
251    fn endianness<BO: ByteOrder>(&mut self, dst: Register, width: DataWidth) -> Result<(), String> {
252        let value = self.reg(dst);
253        let new_value = match width {
254            DataWidth::U16 => BO::read_u16((value.as_u64() as u16).as_bytes()) as u64,
255            DataWidth::U32 => BO::read_u32((value.as_u64() as u32).as_bytes()) as u64,
256            DataWidth::U64 => BO::read_u64(value.as_u64().as_bytes()),
257            _ => {
258                panic!("Unexpected bit width for endianness operation");
259            }
260        };
261        self.set_reg(dst, new_value.into());
262        Ok(())
263    }
264
265    #[inline(always)]
266    fn conditional_jump(
267        &mut self,
268        dst: Register,
269        src: Source,
270        offset: i16,
271        op: impl Fn(u64, u64) -> bool,
272    ) -> Result<(), String> {
273        let op1 = self.reg(dst).as_u64();
274        let op2 = self.compute_source(src.clone()).as_u64();
275        if op(op1, op2) {
276            self.advance_pc(offset);
277        }
278        Ok(())
279    }
280}
281
282impl<C: EbpfProgramContext> BpfVisitor for ComputationContext<'_, C> {
283    type Context<'a> = C::RunContext<'a>;
284
285    #[inline(always)]
286    fn add<'a>(
287        &mut self,
288        _context: &mut Self::Context<'a>,
289        dst: Register,
290        src: Source,
291    ) -> Result<(), String> {
292        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_add(y).0))
293    }
294    #[inline(always)]
295    fn add64<'a>(
296        &mut self,
297        _context: &mut Self::Context<'a>,
298        dst: Register,
299        src: Source,
300    ) -> Result<(), String> {
301        self.alu(dst, src, |x, y| x.overflowing_add(y).0)
302    }
303    #[inline(always)]
304    fn and<'a>(
305        &mut self,
306        _context: &mut Self::Context<'a>,
307        dst: Register,
308        src: Source,
309    ) -> Result<(), String> {
310        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x & y))
311    }
312    #[inline(always)]
313    fn and64<'a>(
314        &mut self,
315        _context: &mut Self::Context<'a>,
316        dst: Register,
317        src: Source,
318    ) -> Result<(), String> {
319        self.alu(dst, src, |x, y| x & y)
320    }
321    #[inline(always)]
322    fn arsh<'a>(
323        &mut self,
324        _context: &mut Self::Context<'a>,
325        dst: Register,
326        src: Source,
327    ) -> Result<(), String> {
328        self.alu(dst, src, |x, y| {
329            alu32(x, y, |x, y| {
330                let x = x as i32;
331                x.overflowing_shr(y).0 as u32
332            })
333        })
334    }
335    #[inline(always)]
336    fn arsh64<'a>(
337        &mut self,
338        _context: &mut Self::Context<'a>,
339        dst: Register,
340        src: Source,
341    ) -> Result<(), String> {
342        self.alu(dst, src, |x, y| {
343            let x = x as i64;
344            if y > u32::MAX.into() {
345                if x >= 0 { 0 } else { u64::MAX }
346            } else {
347                x.overflowing_shr(y as u32).0 as u64
348            }
349        })
350    }
351    #[inline(always)]
352    fn div<'a>(
353        &mut self,
354        _context: &mut Self::Context<'a>,
355        dst: Register,
356        src: Source,
357    ) -> Result<(), String> {
358        self.alu(dst, src, |x, y| alu32(x, y, |x, y| if y == 0 { 0 } else { x / y }))
359    }
360    #[inline(always)]
361    fn div64<'a>(
362        &mut self,
363        _context: &mut Self::Context<'a>,
364        dst: Register,
365        src: Source,
366    ) -> Result<(), String> {
367        self.alu(dst, src, |x, y| if y == 0 { 0 } else { x / y })
368    }
369    #[inline(always)]
370    fn lsh<'a>(
371        &mut self,
372        _context: &mut Self::Context<'a>,
373        dst: Register,
374        src: Source,
375    ) -> Result<(), String> {
376        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_shl(y).0))
377    }
378    #[inline(always)]
379    fn lsh64<'a>(
380        &mut self,
381        _context: &mut Self::Context<'a>,
382        dst: Register,
383        src: Source,
384    ) -> Result<(), String> {
385        self.alu(dst, src, |x, y| x.overflowing_shl(y as u32).0)
386    }
387    #[inline(always)]
388    fn r#mod<'a>(
389        &mut self,
390        _context: &mut Self::Context<'a>,
391        dst: Register,
392        src: Source,
393    ) -> Result<(), String> {
394        self.alu(dst, src, |x, y| alu32(x, y, |x, y| if y == 0 { x } else { x % y }))
395    }
396    #[inline(always)]
397    fn mod64<'a>(
398        &mut self,
399        _context: &mut Self::Context<'a>,
400        dst: Register,
401        src: Source,
402    ) -> Result<(), String> {
403        self.alu(dst, src, |x, y| if y == 0 { x } else { x % y })
404    }
405    #[inline(always)]
406    fn mov<'a>(
407        &mut self,
408        _context: &mut Self::Context<'a>,
409        dst: Register,
410        src: Source,
411    ) -> Result<(), String> {
412        self.alu(dst, src, |x, y| alu32(x, y, |_x, y| y))
413    }
414    #[inline(always)]
415    fn mov64<'a>(
416        &mut self,
417        _context: &mut Self::Context<'a>,
418        dst: Register,
419        src: Source,
420    ) -> Result<(), String> {
421        self.alu(dst, src, |_x, y| y)
422    }
423    #[inline(always)]
424    fn mul<'a>(
425        &mut self,
426        _context: &mut Self::Context<'a>,
427        dst: Register,
428        src: Source,
429    ) -> Result<(), String> {
430        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_mul(y).0))
431    }
432    #[inline(always)]
433    fn mul64<'a>(
434        &mut self,
435        _context: &mut Self::Context<'a>,
436        dst: Register,
437        src: Source,
438    ) -> Result<(), String> {
439        self.alu(dst, src, |x, y| x.overflowing_mul(y).0)
440    }
441    #[inline(always)]
442    fn or<'a>(
443        &mut self,
444        _context: &mut Self::Context<'a>,
445        dst: Register,
446        src: Source,
447    ) -> Result<(), String> {
448        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x | y))
449    }
450    #[inline(always)]
451    fn or64<'a>(
452        &mut self,
453        _context: &mut Self::Context<'a>,
454        dst: Register,
455        src: Source,
456    ) -> Result<(), String> {
457        self.alu(dst, src, |x, y| x | y)
458    }
459    #[inline(always)]
460    fn rsh<'a>(
461        &mut self,
462        _context: &mut Self::Context<'a>,
463        dst: Register,
464        src: Source,
465    ) -> Result<(), String> {
466        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_shr(y).0))
467    }
468    #[inline(always)]
469    fn rsh64<'a>(
470        &mut self,
471        _context: &mut Self::Context<'a>,
472        dst: Register,
473        src: Source,
474    ) -> Result<(), String> {
475        self.alu(dst, src, |x, y| x.overflowing_shr(y as u32).0)
476    }
477    #[inline(always)]
478    fn sub<'a>(
479        &mut self,
480        _context: &mut Self::Context<'a>,
481        dst: Register,
482        src: Source,
483    ) -> Result<(), String> {
484        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_sub(y).0))
485    }
486    #[inline(always)]
487    fn sub64<'a>(
488        &mut self,
489        _context: &mut Self::Context<'a>,
490        dst: Register,
491        src: Source,
492    ) -> Result<(), String> {
493        self.alu(dst, src, |x, y| x.overflowing_sub(y).0)
494    }
495    #[inline(always)]
496    fn xor<'a>(
497        &mut self,
498        _context: &mut Self::Context<'a>,
499        dst: Register,
500        src: Source,
501    ) -> Result<(), String> {
502        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x ^ y))
503    }
504    #[inline(always)]
505    fn xor64<'a>(
506        &mut self,
507        _context: &mut Self::Context<'a>,
508        dst: Register,
509        src: Source,
510    ) -> Result<(), String> {
511        self.alu(dst, src, |x, y| x ^ y)
512    }
513
514    #[inline(always)]
515    fn neg<'a>(&mut self, _context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
516        self.alu(dst, Source::Value(0), |x, y| {
517            alu32(x, y, |x, _y| (x as i32).overflowing_neg().0 as u32)
518        })
519    }
520    #[inline(always)]
521    fn neg64<'a>(&mut self, _context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
522        self.alu(dst, Source::Value(0), |x, _y| (x as i64).overflowing_neg().0 as u64)
523    }
524
525    #[inline(always)]
526    fn be<'a>(
527        &mut self,
528        _context: &mut Self::Context<'a>,
529        dst: Register,
530        width: DataWidth,
531    ) -> Result<(), String> {
532        self.endianness::<BigEndian>(dst, width)
533    }
534
535    #[inline(always)]
536    fn le<'a>(
537        &mut self,
538        _context: &mut Self::Context<'a>,
539        dst: Register,
540        width: DataWidth,
541    ) -> Result<(), String> {
542        self.endianness::<LittleEndian>(dst, width)
543    }
544
545    #[inline(always)]
546    fn call_external<'a>(
547        &mut self,
548        context: &mut Self::Context<'a>,
549        index: u32,
550    ) -> Result<(), String> {
551        let helper = &self.helpers.get_by_index(index).unwrap();
552        let result =
553            helper.0(context, self.reg(1), self.reg(2), self.reg(3), self.reg(4), self.reg(5));
554        self.set_reg(0, result);
555        Ok(())
556    }
557
558    #[inline(always)]
559    fn exit<'a>(&mut self, _context: &mut Self::Context<'a>) -> Result<(), String> {
560        self.result = Some(self.reg(0).as_u64());
561        Ok(())
562    }
563
564    #[inline(always)]
565    fn jump<'a>(&mut self, _context: &mut Self::Context<'a>, offset: i16) -> Result<(), String> {
566        self.advance_pc(offset);
567        Ok(())
568    }
569
570    #[inline(always)]
571    fn jeq<'a>(
572        &mut self,
573        _context: &mut Self::Context<'a>,
574        dst: Register,
575        src: Source,
576        offset: i16,
577    ) -> Result<(), String> {
578        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x == y))
579    }
580    #[inline(always)]
581    fn jeq64<'a>(
582        &mut self,
583        _context: &mut Self::Context<'a>,
584        dst: Register,
585        src: Source,
586        offset: i16,
587    ) -> Result<(), String> {
588        self.conditional_jump(dst, src, offset, |x, y| x == y)
589    }
590    #[inline(always)]
591    fn jne<'a>(
592        &mut self,
593        _context: &mut Self::Context<'a>,
594        dst: Register,
595        src: Source,
596        offset: i16,
597    ) -> Result<(), String> {
598        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x != y))
599    }
600    #[inline(always)]
601    fn jne64<'a>(
602        &mut self,
603        _context: &mut Self::Context<'a>,
604        dst: Register,
605        src: Source,
606        offset: i16,
607    ) -> Result<(), String> {
608        self.conditional_jump(dst, src, offset, |x, y| x != y)
609    }
610    #[inline(always)]
611    fn jge<'a>(
612        &mut self,
613        _context: &mut Self::Context<'a>,
614        dst: Register,
615        src: Source,
616        offset: i16,
617    ) -> Result<(), String> {
618        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x >= y))
619    }
620    #[inline(always)]
621    fn jge64<'a>(
622        &mut self,
623        _context: &mut Self::Context<'a>,
624        dst: Register,
625        src: Source,
626        offset: i16,
627    ) -> Result<(), String> {
628        self.conditional_jump(dst, src, offset, |x, y| x >= y)
629    }
630    #[inline(always)]
631    fn jgt<'a>(
632        &mut self,
633        _context: &mut Self::Context<'a>,
634        dst: Register,
635        src: Source,
636        offset: i16,
637    ) -> Result<(), String> {
638        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x > y))
639    }
640    #[inline(always)]
641    fn jgt64<'a>(
642        &mut self,
643        _context: &mut Self::Context<'a>,
644        dst: Register,
645        src: Source,
646        offset: i16,
647    ) -> Result<(), String> {
648        self.conditional_jump(dst, src, offset, |x, y| x > y)
649    }
650    #[inline(always)]
651    fn jle<'a>(
652        &mut self,
653        _context: &mut Self::Context<'a>,
654        dst: Register,
655        src: Source,
656        offset: i16,
657    ) -> Result<(), String> {
658        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x <= y))
659    }
660    #[inline(always)]
661    fn jle64<'a>(
662        &mut self,
663        _context: &mut Self::Context<'a>,
664        dst: Register,
665        src: Source,
666        offset: i16,
667    ) -> Result<(), String> {
668        self.conditional_jump(dst, src, offset, |x, y| x <= y)
669    }
670    #[inline(always)]
671    fn jlt<'a>(
672        &mut self,
673        _context: &mut Self::Context<'a>,
674        dst: Register,
675        src: Source,
676        offset: i16,
677    ) -> Result<(), String> {
678        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x < y))
679    }
680    #[inline(always)]
681    fn jlt64<'a>(
682        &mut self,
683        _context: &mut Self::Context<'a>,
684        dst: Register,
685        src: Source,
686        offset: i16,
687    ) -> Result<(), String> {
688        self.conditional_jump(dst, src, offset, |x, y| x < y)
689    }
690    #[inline(always)]
691    fn jsge<'a>(
692        &mut self,
693        _context: &mut Self::Context<'a>,
694        dst: Register,
695        src: Source,
696        offset: i16,
697    ) -> Result<(), String> {
698        self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x >= y))
699    }
700    #[inline(always)]
701    fn jsge64<'a>(
702        &mut self,
703        _context: &mut Self::Context<'a>,
704        dst: Register,
705        src: Source,
706        offset: i16,
707    ) -> Result<(), String> {
708        self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x >= y))
709    }
710    #[inline(always)]
711    fn jsgt<'a>(
712        &mut self,
713        _context: &mut Self::Context<'a>,
714        dst: Register,
715        src: Source,
716        offset: i16,
717    ) -> Result<(), String> {
718        self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x > y))
719    }
720    #[inline(always)]
721    fn jsgt64<'a>(
722        &mut self,
723        _context: &mut Self::Context<'a>,
724        dst: Register,
725        src: Source,
726        offset: i16,
727    ) -> Result<(), String> {
728        self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x > y))
729    }
730    #[inline(always)]
731    fn jsle<'a>(
732        &mut self,
733        _context: &mut Self::Context<'a>,
734        dst: Register,
735        src: Source,
736        offset: i16,
737    ) -> Result<(), String> {
738        self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x <= y))
739    }
740    #[inline(always)]
741    fn jsle64<'a>(
742        &mut self,
743        _context: &mut Self::Context<'a>,
744        dst: Register,
745        src: Source,
746        offset: i16,
747    ) -> Result<(), String> {
748        self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x <= y))
749    }
750    #[inline(always)]
751    fn jslt<'a>(
752        &mut self,
753        _context: &mut Self::Context<'a>,
754        dst: Register,
755        src: Source,
756        offset: i16,
757    ) -> Result<(), String> {
758        self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x < y))
759    }
760    #[inline(always)]
761    fn jslt64<'a>(
762        &mut self,
763        _context: &mut Self::Context<'a>,
764        dst: Register,
765        src: Source,
766        offset: i16,
767    ) -> Result<(), String> {
768        self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x < y))
769    }
770    #[inline(always)]
771    fn jset<'a>(
772        &mut self,
773        _context: &mut Self::Context<'a>,
774        dst: Register,
775        src: Source,
776        offset: i16,
777    ) -> Result<(), String> {
778        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x & y != 0))
779    }
780    #[inline(always)]
781    fn jset64<'a>(
782        &mut self,
783        _context: &mut Self::Context<'a>,
784        dst: Register,
785        src: Source,
786        offset: i16,
787    ) -> Result<(), String> {
788        self.conditional_jump(dst, src, offset, |x, y| x & y != 0)
789    }
790
791    #[inline(always)]
792    fn atomic_add<'a>(
793        &mut self,
794        _context: &mut Self::Context<'a>,
795        fetch: bool,
796        dst: Register,
797        offset: i16,
798        src: Register,
799    ) -> Result<(), String> {
800        self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_add(v, Ordering::SeqCst))
801    }
802
803    #[inline(always)]
804    fn atomic_add64<'a>(
805        &mut self,
806        _context: &mut Self::Context<'a>,
807        fetch: bool,
808        dst: Register,
809        offset: i16,
810        src: Register,
811    ) -> Result<(), String> {
812        self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_add(v, Ordering::SeqCst))
813    }
814
815    #[inline(always)]
816    fn atomic_and<'a>(
817        &mut self,
818        _context: &mut Self::Context<'a>,
819        fetch: bool,
820        dst: Register,
821        offset: i16,
822        src: Register,
823    ) -> Result<(), String> {
824        self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_and(v, Ordering::SeqCst))
825    }
826
827    #[inline(always)]
828    fn atomic_and64<'a>(
829        &mut self,
830        _context: &mut Self::Context<'a>,
831        fetch: bool,
832        dst: Register,
833        offset: i16,
834        src: Register,
835    ) -> Result<(), String> {
836        self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_and(v, Ordering::SeqCst))
837    }
838
839    #[inline(always)]
840    fn atomic_or<'a>(
841        &mut self,
842        _context: &mut Self::Context<'a>,
843        fetch: bool,
844        dst: Register,
845        offset: i16,
846        src: Register,
847    ) -> Result<(), String> {
848        self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_or(v, Ordering::SeqCst))
849    }
850
851    #[inline(always)]
852    fn atomic_or64<'a>(
853        &mut self,
854        _context: &mut Self::Context<'a>,
855        fetch: bool,
856        dst: Register,
857        offset: i16,
858        src: Register,
859    ) -> Result<(), String> {
860        self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_or(v, Ordering::SeqCst))
861    }
862
863    #[inline(always)]
864    fn atomic_xor<'a>(
865        &mut self,
866        _context: &mut Self::Context<'a>,
867        fetch: bool,
868        dst: Register,
869        offset: i16,
870        src: Register,
871    ) -> Result<(), String> {
872        self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_xor(v, Ordering::SeqCst))
873    }
874
875    #[inline(always)]
876    fn atomic_xor64<'a>(
877        &mut self,
878        _context: &mut Self::Context<'a>,
879        fetch: bool,
880        dst: Register,
881        offset: i16,
882        src: Register,
883    ) -> Result<(), String> {
884        self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_xor(v, Ordering::SeqCst))
885    }
886
887    #[inline(always)]
888    fn atomic_xchg<'a>(
889        &mut self,
890        _context: &mut Self::Context<'a>,
891        fetch: bool,
892        dst: Register,
893        offset: i16,
894        src: Register,
895    ) -> Result<(), String> {
896        self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.swap(v, Ordering::SeqCst))
897    }
898
899    #[inline(always)]
900    fn atomic_xchg64<'a>(
901        &mut self,
902        _context: &mut Self::Context<'a>,
903        fetch: bool,
904        dst: Register,
905        offset: i16,
906        src: Register,
907    ) -> Result<(), String> {
908        self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.swap(v, Ordering::SeqCst))
909    }
910
911    #[inline(always)]
912    fn atomic_cmpxchg<'a>(
913        &mut self,
914        _context: &mut Self::Context<'a>,
915        dst: Register,
916        offset: i16,
917        src: Register,
918    ) -> Result<(), String> {
919        self.atomic_operation(false, dst, offset, src, |this, a, v| {
920            let r0 = this.reg(0).as_u32();
921            let r0 = match a.compare_exchange(r0, v, Ordering::SeqCst, Ordering::SeqCst) {
922                Ok(v) | Err(v) => v,
923            };
924            this.set_reg(0, r0.into());
925            0
926        })
927    }
928
929    #[inline(always)]
930    fn atomic_cmpxchg64<'a>(
931        &mut self,
932        _context: &mut Self::Context<'a>,
933        dst: Register,
934        offset: i16,
935        src: Register,
936    ) -> Result<(), String> {
937        self.atomic_operation64(false, dst, offset, src, |this, a, v| {
938            let r0 = this.reg(0).as_u64();
939            let r0 = match a.compare_exchange(r0, v, Ordering::SeqCst, Ordering::SeqCst) {
940                Ok(v) | Err(v) => v,
941            };
942            this.set_reg(0, r0.into());
943            0
944        })
945    }
946
947    #[inline(always)]
948    fn load<'a>(
949        &mut self,
950        _context: &mut Self::Context<'a>,
951        dst: Register,
952        offset: i16,
953        src: Register,
954        width: DataWidth,
955    ) -> Result<(), String> {
956        let addr = self.reg(src);
957        let loaded = self.load_memory(addr, offset as u64, width);
958        self.set_reg(dst, loaded);
959        Ok(())
960    }
961
962    #[inline(always)]
963    fn load64<'a>(
964        &mut self,
965        _context: &mut Self::Context<'a>,
966        dst: Register,
967        _src: u8,
968        lower: u32,
969    ) -> Result<(), String> {
970        let value = (lower as u64) | (((self.code[self.pc + 1].imm() as u32) as u64) << 32);
971        self.set_reg(dst, value.into());
972        self.advance_pc(1);
973        Ok(())
974    }
975
976    #[inline(always)]
977    fn load_from_packet<'a>(
978        &mut self,
979        context: &mut Self::Context<'a>,
980        dst_reg: Register,
981        src_reg: Register,
982        offset: i32,
983        register_offset: Option<Register>,
984        width: DataWidth,
985    ) -> Result<(), String> {
986        let Some(offset) =
987            register_offset.map(|r| self.reg(r).as_i32()).unwrap_or(0).checked_add(offset as i32)
988        else {
989            // Offset overflowed. Exit.
990            self.result = Some(self.reg(0).as_u64());
991            return Ok(());
992        };
993        let src_reg = self.reg(src_reg);
994        // SAFETY: The verifier checks that the `src_reg` points at packet.
995        let packet = unsafe { C::Packet::from_bpf_value(context, src_reg) };
996        if let Some(value) = packet.load(offset, width) {
997            self.set_reg(dst_reg, value.into());
998        } else {
999            self.result = Some(self.reg(0).as_u64());
1000        }
1001        Ok(())
1002    }
1003
1004    #[inline(always)]
1005    fn store<'a>(
1006        &mut self,
1007        _context: &mut Self::Context<'a>,
1008        dst: Register,
1009        offset: i16,
1010        src: Source,
1011        width: DataWidth,
1012    ) -> Result<(), String> {
1013        let src = self.compute_source(src);
1014        let dst = self.reg(dst);
1015        self.store_memory(dst, src, offset as u64, width);
1016        Ok(())
1017    }
1018}
1019
1020#[inline(always)]
1021fn alu32(x: u64, y: u64, op: impl FnOnce(u32, u32) -> u32) -> u64 {
1022    op(x as u32, y as u32) as u64
1023}
1024
1025#[inline(always)]
1026fn comp32(x: u64, y: u64, op: impl FnOnce(u32, u32) -> bool) -> bool {
1027    op(x as u32, y as u32)
1028}
1029
1030#[inline(always)]
1031fn scomp64(x: u64, y: u64, op: impl FnOnce(i64, i64) -> bool) -> bool {
1032    op(x as i64, y as i64)
1033}
1034
1035#[inline(always)]
1036fn scomp32(x: u64, y: u64, op: impl FnOnce(i32, i32) -> bool) -> bool {
1037    op(x as i32, y as i32)
1038}