ebpf/
executor.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::visitor::{BpfVisitor, ProgramCounter, Register, Source};
6use crate::{
7    BPF_STACK_SIZE, BpfValue, DataWidth, EbpfInstruction, EbpfProgramContext, FromBpfValue,
8    GENERAL_REGISTER_COUNT, HelperSet, Packet,
9};
10use byteorder::{BigEndian, ByteOrder, LittleEndian};
11use std::mem::MaybeUninit;
12use std::pin::Pin;
13use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
14use zerocopy::IntoBytes;
15
16pub fn execute<C: EbpfProgramContext>(
17    code: &[EbpfInstruction],
18    helpers: &HelperSet<C>,
19    run_context: &mut C::RunContext<'_>,
20    arguments: &[BpfValue],
21) -> u64 {
22    assert!(arguments.len() < 5);
23    let mut context = ComputationContext {
24        code,
25        helpers,
26        registers: Default::default(),
27        stack: vec![MaybeUninit::uninit(); BPF_STACK_SIZE / std::mem::size_of::<BpfValue>()]
28            .into_boxed_slice()
29            .into(),
30        pc: 0,
31        result: None,
32    };
33    for (i, v) in arguments.iter().enumerate() {
34        // Arguments are in registers r1 to r5.
35        context.set_reg((i as u8) + 1, *v);
36    }
37
38    // R10 points at the stack.
39    context.registers[10] =
40        BpfValue::from((context.stack.as_mut_ptr() as u64) + (BPF_STACK_SIZE as u64));
41
42    loop {
43        if let Some(result) = context.result {
44            return result;
45        }
46        context.visit(run_context, code[context.pc]).expect("verifier should have found an issue");
47        context.next();
48    }
49}
50
51impl BpfValue {
52    #[inline(always)]
53    pub fn add(&self, offset: u64) -> Self {
54        Self::from(self.as_u64().overflowing_add(offset).0)
55    }
56}
57
58/// The state of the computation as known by the interpreter at a given point in time.
59struct ComputationContext<'a, C: EbpfProgramContext> {
60    /// The program being executed.
61    code: &'a [EbpfInstruction],
62    /// Helpers.
63    helpers: &'a HelperSet<C>,
64    /// Registers.
65    registers: [BpfValue; GENERAL_REGISTER_COUNT as usize + 1],
66    /// The state of the stack.
67    stack: Pin<Box<[MaybeUninit<BpfValue>]>>,
68    /// The program counter.
69    pc: ProgramCounter,
70    /// The result, set to Some(value) when the program terminates.
71    result: Option<u64>,
72}
73
74impl<C: EbpfProgramContext> ComputationContext<'_, C> {
75    #[inline(always)]
76    fn reg(&mut self, index: Register) -> BpfValue {
77        self.registers[index as usize]
78    }
79
80    #[inline(always)]
81    fn set_reg(&mut self, index: Register, value: BpfValue) {
82        self.registers[index as usize] = value;
83    }
84
85    #[inline(always)]
86    fn next(&mut self) {
87        self.advance_pc(1)
88    }
89
90    /// Adds `offset` to the program counter in `ComputationContext`.
91    #[inline(always)]
92    fn advance_pc(&mut self, offset: i16) {
93        let mut pc = self.pc as i64;
94        pc += offset as i64;
95        self.pc = pc as usize;
96    }
97
98    #[inline(always)]
99    fn store_memory(
100        &mut self,
101        addr: BpfValue,
102        value: BpfValue,
103        instruction_offset: u64,
104        width: DataWidth,
105    ) {
106        // SAFETY
107        //
108        // The address has been verified by the verifier that ensured the memory is valid for
109        // writing.
110        let addr = addr.add(instruction_offset);
111        #[allow(
112            clippy::undocumented_unsafe_blocks,
113            reason = "Force documented unsafe blocks in Starnix"
114        )]
115        match width {
116            DataWidth::U8 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u8()) },
117            DataWidth::U16 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u16()) },
118            DataWidth::U32 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u32()) },
119            DataWidth::U64 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u64()) },
120        }
121    }
122
123    #[inline(always)]
124    fn load_memory(&self, addr: BpfValue, instruction_offset: u64, width: DataWidth) -> BpfValue {
125        // SAFETY
126        //
127        // The address has been verified by the verifier that ensured the memory is valid for
128        // reading.
129        let addr = addr.add(instruction_offset);
130        match width {
131            DataWidth::U8 =>
132            {
133                #[allow(
134                    clippy::undocumented_unsafe_blocks,
135                    reason = "Force documented unsafe blocks in Starnix"
136                )]
137                BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u8>()) })
138            }
139            DataWidth::U16 =>
140            {
141                #[allow(
142                    clippy::undocumented_unsafe_blocks,
143                    reason = "Force documented unsafe blocks in Starnix"
144                )]
145                BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u16>()) })
146            }
147            DataWidth::U32 =>
148            {
149                #[allow(
150                    clippy::undocumented_unsafe_blocks,
151                    reason = "Force documented unsafe blocks in Starnix"
152                )]
153                BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u32>()) })
154            }
155            DataWidth::U64 =>
156            {
157                #[allow(
158                    clippy::undocumented_unsafe_blocks,
159                    reason = "Force documented unsafe blocks in Starnix"
160                )]
161                BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u64>()) })
162            }
163        }
164    }
165
166    #[inline(always)]
167    fn compute_source(&mut self, src: Source) -> BpfValue {
168        match src {
169            Source::Reg(reg) => self.reg(reg),
170            Source::Value(v) => v.into(),
171        }
172    }
173
174    #[inline(always)]
175    fn alu(
176        &mut self,
177        dst: Register,
178        src: Source,
179        op: impl Fn(u64, u64) -> u64,
180    ) -> Result<(), String> {
181        let op1 = self.reg(dst).as_u64();
182        let op2 = self.compute_source(src).as_u64();
183        let result = op(op1, op2);
184        self.set_reg(dst, result.into());
185        Ok(())
186    }
187
188    #[inline(always)]
189    fn atomic_operation(
190        &mut self,
191        fetch: bool,
192        dst: Register,
193        offset: i16,
194        src: Register,
195        op: impl Fn(&mut Self, &AtomicU32, u32) -> u32,
196    ) -> Result<(), String> {
197        let addr = self.reg(dst).add(offset as u64);
198        // TODO How to statically check alignment?
199        if addr.as_usize() % std::mem::size_of::<AtomicU32>() != 0 {
200            return Err(format!("misaligned access"));
201        }
202        // SAFETY
203        //
204        // The address has been verified by the verifier that ensured the memory is valid for
205        // reading and writing.
206        #[allow(
207            clippy::undocumented_unsafe_blocks,
208            reason = "Force documented unsafe blocks in Starnix"
209        )]
210        let atomic = unsafe { &*addr.as_ptr::<AtomicU32>() };
211        let value = self.reg(src).as_u32();
212        let old_value = op(self, atomic, value);
213        if fetch {
214            self.set_reg(src, old_value.into());
215        }
216        Ok(())
217    }
218
219    #[inline(always)]
220    fn atomic_operation64(
221        &mut self,
222        fetch: bool,
223        dst: Register,
224        offset: i16,
225        src: Register,
226        op: impl Fn(&mut Self, &AtomicU64, u64) -> u64,
227    ) -> Result<(), String> {
228        let addr = self.reg(dst).add(offset as u64);
229        // TODO How to statically check alignment?
230        if addr.as_usize() % std::mem::size_of::<AtomicU64>() != 0 {
231            return Err(format!("misaligned access"));
232        }
233        // SAFETY
234        //
235        // The address has been verified by the verifier that ensured the memory is valid for
236        // reading and writing.
237        #[allow(
238            clippy::undocumented_unsafe_blocks,
239            reason = "Force documented unsafe blocks in Starnix"
240        )]
241        let atomic = unsafe { &*addr.as_ptr::<AtomicU64>() };
242        let value = self.reg(src).as_u64();
243        let old_value = op(self, atomic, value);
244        if fetch {
245            self.set_reg(src, old_value.into());
246        }
247        Ok(())
248    }
249
250    #[inline(always)]
251    fn endianness<BO: ByteOrder>(&mut self, dst: Register, width: DataWidth) -> Result<(), String> {
252        let value = self.reg(dst);
253        let new_value = match width {
254            DataWidth::U16 => BO::read_u16((value.as_u64() as u16).as_bytes()) as u64,
255            DataWidth::U32 => BO::read_u32((value.as_u64() as u32).as_bytes()) as u64,
256            DataWidth::U64 => BO::read_u64(value.as_u64().as_bytes()),
257            _ => {
258                panic!("Unexpected bit width for endianness operation");
259            }
260        };
261        self.set_reg(dst, new_value.into());
262        Ok(())
263    }
264
265    #[inline(always)]
266    fn conditional_jump(
267        &mut self,
268        dst: Register,
269        src: Source,
270        offset: i16,
271        op: impl Fn(u64, u64) -> bool,
272    ) -> Result<(), String> {
273        let op1 = self.reg(dst).as_u64();
274        let op2 = self.compute_source(src.clone()).as_u64();
275        if op(op1, op2) {
276            self.advance_pc(offset);
277        }
278        Ok(())
279    }
280}
281
282impl<C: EbpfProgramContext> BpfVisitor for ComputationContext<'_, C> {
283    type Context<'a> = C::RunContext<'a>;
284
285    #[inline(always)]
286    fn add<'a>(
287        &mut self,
288        _context: &mut Self::Context<'a>,
289        dst: Register,
290        src: Source,
291    ) -> Result<(), String> {
292        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_add(y).0))
293    }
294    #[inline(always)]
295    fn add64<'a>(
296        &mut self,
297        _context: &mut Self::Context<'a>,
298        dst: Register,
299        src: Source,
300    ) -> Result<(), String> {
301        self.alu(dst, src, |x, y| x.overflowing_add(y).0)
302    }
303    #[inline(always)]
304    fn and<'a>(
305        &mut self,
306        _context: &mut Self::Context<'a>,
307        dst: Register,
308        src: Source,
309    ) -> Result<(), String> {
310        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x & y))
311    }
312    #[inline(always)]
313    fn and64<'a>(
314        &mut self,
315        _context: &mut Self::Context<'a>,
316        dst: Register,
317        src: Source,
318    ) -> Result<(), String> {
319        self.alu(dst, src, |x, y| x & y)
320    }
321    #[inline(always)]
322    fn arsh<'a>(
323        &mut self,
324        _context: &mut Self::Context<'a>,
325        dst: Register,
326        src: Source,
327    ) -> Result<(), String> {
328        self.alu(dst, src, |x, y| {
329            alu32(x, y, |x, y| {
330                let x = x as i32;
331                x.overflowing_shr(y).0 as u32
332            })
333        })
334    }
335    #[inline(always)]
336    fn arsh64<'a>(
337        &mut self,
338        _context: &mut Self::Context<'a>,
339        dst: Register,
340        src: Source,
341    ) -> Result<(), String> {
342        self.alu(dst, src, |x, y| {
343            let x = x as i64;
344            // ebpf mask shift operation to the number of bytes, as does `overflowing_sh{rl}`
345            // rust operation. So, it is valid to just cast it to `u32`.
346            let y = y as u32;
347            x.overflowing_shr(y).0 as u64
348        })
349    }
350    #[inline(always)]
351    fn div<'a>(
352        &mut self,
353        _context: &mut Self::Context<'a>,
354        dst: Register,
355        src: Source,
356    ) -> Result<(), String> {
357        self.alu(dst, src, |x, y| alu32(x, y, |x, y| if y == 0 { 0 } else { x / y }))
358    }
359    #[inline(always)]
360    fn div64<'a>(
361        &mut self,
362        _context: &mut Self::Context<'a>,
363        dst: Register,
364        src: Source,
365    ) -> Result<(), String> {
366        self.alu(dst, src, |x, y| if y == 0 { 0 } else { x / y })
367    }
368    #[inline(always)]
369    fn lsh<'a>(
370        &mut self,
371        _context: &mut Self::Context<'a>,
372        dst: Register,
373        src: Source,
374    ) -> Result<(), String> {
375        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_shl(y).0))
376    }
377    #[inline(always)]
378    fn lsh64<'a>(
379        &mut self,
380        _context: &mut Self::Context<'a>,
381        dst: Register,
382        src: Source,
383    ) -> Result<(), String> {
384        self.alu(dst, src, |x, y| {
385            // ebpf mask shift operation to the number of bytes, as does `overflowing_sh{rl}`
386            // rust operation. So, it is valid to just cast it to `u32`.
387            let y = y as u32;
388            x.overflowing_shl(y).0
389        })
390    }
391    #[inline(always)]
392    fn r#mod<'a>(
393        &mut self,
394        _context: &mut Self::Context<'a>,
395        dst: Register,
396        src: Source,
397    ) -> Result<(), String> {
398        self.alu(dst, src, |x, y| alu32(x, y, |x, y| if y == 0 { x } else { x % y }))
399    }
400    #[inline(always)]
401    fn mod64<'a>(
402        &mut self,
403        _context: &mut Self::Context<'a>,
404        dst: Register,
405        src: Source,
406    ) -> Result<(), String> {
407        self.alu(dst, src, |x, y| if y == 0 { x } else { x % y })
408    }
409    #[inline(always)]
410    fn mov<'a>(
411        &mut self,
412        _context: &mut Self::Context<'a>,
413        dst: Register,
414        src: Source,
415    ) -> Result<(), String> {
416        self.alu(dst, src, |x, y| alu32(x, y, |_x, y| y))
417    }
418    #[inline(always)]
419    fn mov64<'a>(
420        &mut self,
421        _context: &mut Self::Context<'a>,
422        dst: Register,
423        src: Source,
424    ) -> Result<(), String> {
425        self.alu(dst, src, |_x, y| y)
426    }
427    #[inline(always)]
428    fn mul<'a>(
429        &mut self,
430        _context: &mut Self::Context<'a>,
431        dst: Register,
432        src: Source,
433    ) -> Result<(), String> {
434        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_mul(y).0))
435    }
436    #[inline(always)]
437    fn mul64<'a>(
438        &mut self,
439        _context: &mut Self::Context<'a>,
440        dst: Register,
441        src: Source,
442    ) -> Result<(), String> {
443        self.alu(dst, src, |x, y| x.overflowing_mul(y).0)
444    }
445    #[inline(always)]
446    fn or<'a>(
447        &mut self,
448        _context: &mut Self::Context<'a>,
449        dst: Register,
450        src: Source,
451    ) -> Result<(), String> {
452        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x | y))
453    }
454    #[inline(always)]
455    fn or64<'a>(
456        &mut self,
457        _context: &mut Self::Context<'a>,
458        dst: Register,
459        src: Source,
460    ) -> Result<(), String> {
461        self.alu(dst, src, |x, y| x | y)
462    }
463    #[inline(always)]
464    fn rsh<'a>(
465        &mut self,
466        _context: &mut Self::Context<'a>,
467        dst: Register,
468        src: Source,
469    ) -> Result<(), String> {
470        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_shr(y).0))
471    }
472    #[inline(always)]
473    fn rsh64<'a>(
474        &mut self,
475        _context: &mut Self::Context<'a>,
476        dst: Register,
477        src: Source,
478    ) -> Result<(), String> {
479        self.alu(dst, src, |x, y| {
480            // ebpf mask shift operation to the number of bytes, as does `overflowing_sh{rl}`
481            // rust operation. So, it is valid to just cast it to `u32`.
482            let y = y as u32;
483            x.overflowing_shr(y).0
484        })
485    }
486    #[inline(always)]
487    fn sub<'a>(
488        &mut self,
489        _context: &mut Self::Context<'a>,
490        dst: Register,
491        src: Source,
492    ) -> Result<(), String> {
493        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_sub(y).0))
494    }
495    #[inline(always)]
496    fn sub64<'a>(
497        &mut self,
498        _context: &mut Self::Context<'a>,
499        dst: Register,
500        src: Source,
501    ) -> Result<(), String> {
502        self.alu(dst, src, |x, y| x.overflowing_sub(y).0)
503    }
504    #[inline(always)]
505    fn xor<'a>(
506        &mut self,
507        _context: &mut Self::Context<'a>,
508        dst: Register,
509        src: Source,
510    ) -> Result<(), String> {
511        self.alu(dst, src, |x, y| alu32(x, y, |x, y| x ^ y))
512    }
513    #[inline(always)]
514    fn xor64<'a>(
515        &mut self,
516        _context: &mut Self::Context<'a>,
517        dst: Register,
518        src: Source,
519    ) -> Result<(), String> {
520        self.alu(dst, src, |x, y| x ^ y)
521    }
522
523    #[inline(always)]
524    fn neg<'a>(&mut self, _context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
525        self.alu(dst, Source::Value(0), |x, y| {
526            alu32(x, y, |x, _y| (x as i32).overflowing_neg().0 as u32)
527        })
528    }
529    #[inline(always)]
530    fn neg64<'a>(&mut self, _context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
531        self.alu(dst, Source::Value(0), |x, _y| (x as i64).overflowing_neg().0 as u64)
532    }
533
534    #[inline(always)]
535    fn be<'a>(
536        &mut self,
537        _context: &mut Self::Context<'a>,
538        dst: Register,
539        width: DataWidth,
540    ) -> Result<(), String> {
541        self.endianness::<BigEndian>(dst, width)
542    }
543
544    #[inline(always)]
545    fn le<'a>(
546        &mut self,
547        _context: &mut Self::Context<'a>,
548        dst: Register,
549        width: DataWidth,
550    ) -> Result<(), String> {
551        self.endianness::<LittleEndian>(dst, width)
552    }
553
554    #[inline(always)]
555    fn call_external<'a>(
556        &mut self,
557        context: &mut Self::Context<'a>,
558        index: u32,
559    ) -> Result<(), String> {
560        let helper = &self.helpers.get_by_index(index).unwrap();
561        let result =
562            helper.0(context, self.reg(1), self.reg(2), self.reg(3), self.reg(4), self.reg(5));
563        self.set_reg(0, result);
564        Ok(())
565    }
566
567    #[inline(always)]
568    fn exit<'a>(&mut self, _context: &mut Self::Context<'a>) -> Result<(), String> {
569        self.result = Some(self.reg(0).as_u64());
570        Ok(())
571    }
572
573    #[inline(always)]
574    fn jump<'a>(&mut self, _context: &mut Self::Context<'a>, offset: i16) -> Result<(), String> {
575        self.advance_pc(offset);
576        Ok(())
577    }
578
579    #[inline(always)]
580    fn jeq<'a>(
581        &mut self,
582        _context: &mut Self::Context<'a>,
583        dst: Register,
584        src: Source,
585        offset: i16,
586    ) -> Result<(), String> {
587        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x == y))
588    }
589    #[inline(always)]
590    fn jeq64<'a>(
591        &mut self,
592        _context: &mut Self::Context<'a>,
593        dst: Register,
594        src: Source,
595        offset: i16,
596    ) -> Result<(), String> {
597        self.conditional_jump(dst, src, offset, |x, y| x == y)
598    }
599    #[inline(always)]
600    fn jne<'a>(
601        &mut self,
602        _context: &mut Self::Context<'a>,
603        dst: Register,
604        src: Source,
605        offset: i16,
606    ) -> Result<(), String> {
607        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x != y))
608    }
609    #[inline(always)]
610    fn jne64<'a>(
611        &mut self,
612        _context: &mut Self::Context<'a>,
613        dst: Register,
614        src: Source,
615        offset: i16,
616    ) -> Result<(), String> {
617        self.conditional_jump(dst, src, offset, |x, y| x != y)
618    }
619    #[inline(always)]
620    fn jge<'a>(
621        &mut self,
622        _context: &mut Self::Context<'a>,
623        dst: Register,
624        src: Source,
625        offset: i16,
626    ) -> Result<(), String> {
627        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x >= y))
628    }
629    #[inline(always)]
630    fn jge64<'a>(
631        &mut self,
632        _context: &mut Self::Context<'a>,
633        dst: Register,
634        src: Source,
635        offset: i16,
636    ) -> Result<(), String> {
637        self.conditional_jump(dst, src, offset, |x, y| x >= y)
638    }
639    #[inline(always)]
640    fn jgt<'a>(
641        &mut self,
642        _context: &mut Self::Context<'a>,
643        dst: Register,
644        src: Source,
645        offset: i16,
646    ) -> Result<(), String> {
647        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x > y))
648    }
649    #[inline(always)]
650    fn jgt64<'a>(
651        &mut self,
652        _context: &mut Self::Context<'a>,
653        dst: Register,
654        src: Source,
655        offset: i16,
656    ) -> Result<(), String> {
657        self.conditional_jump(dst, src, offset, |x, y| x > y)
658    }
659    #[inline(always)]
660    fn jle<'a>(
661        &mut self,
662        _context: &mut Self::Context<'a>,
663        dst: Register,
664        src: Source,
665        offset: i16,
666    ) -> Result<(), String> {
667        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x <= y))
668    }
669    #[inline(always)]
670    fn jle64<'a>(
671        &mut self,
672        _context: &mut Self::Context<'a>,
673        dst: Register,
674        src: Source,
675        offset: i16,
676    ) -> Result<(), String> {
677        self.conditional_jump(dst, src, offset, |x, y| x <= y)
678    }
679    #[inline(always)]
680    fn jlt<'a>(
681        &mut self,
682        _context: &mut Self::Context<'a>,
683        dst: Register,
684        src: Source,
685        offset: i16,
686    ) -> Result<(), String> {
687        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x < y))
688    }
689    #[inline(always)]
690    fn jlt64<'a>(
691        &mut self,
692        _context: &mut Self::Context<'a>,
693        dst: Register,
694        src: Source,
695        offset: i16,
696    ) -> Result<(), String> {
697        self.conditional_jump(dst, src, offset, |x, y| x < y)
698    }
699    #[inline(always)]
700    fn jsge<'a>(
701        &mut self,
702        _context: &mut Self::Context<'a>,
703        dst: Register,
704        src: Source,
705        offset: i16,
706    ) -> Result<(), String> {
707        self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x >= y))
708    }
709    #[inline(always)]
710    fn jsge64<'a>(
711        &mut self,
712        _context: &mut Self::Context<'a>,
713        dst: Register,
714        src: Source,
715        offset: i16,
716    ) -> Result<(), String> {
717        self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x >= y))
718    }
719    #[inline(always)]
720    fn jsgt<'a>(
721        &mut self,
722        _context: &mut Self::Context<'a>,
723        dst: Register,
724        src: Source,
725        offset: i16,
726    ) -> Result<(), String> {
727        self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x > y))
728    }
729    #[inline(always)]
730    fn jsgt64<'a>(
731        &mut self,
732        _context: &mut Self::Context<'a>,
733        dst: Register,
734        src: Source,
735        offset: i16,
736    ) -> Result<(), String> {
737        self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x > y))
738    }
739    #[inline(always)]
740    fn jsle<'a>(
741        &mut self,
742        _context: &mut Self::Context<'a>,
743        dst: Register,
744        src: Source,
745        offset: i16,
746    ) -> Result<(), String> {
747        self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x <= y))
748    }
749    #[inline(always)]
750    fn jsle64<'a>(
751        &mut self,
752        _context: &mut Self::Context<'a>,
753        dst: Register,
754        src: Source,
755        offset: i16,
756    ) -> Result<(), String> {
757        self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x <= y))
758    }
759    #[inline(always)]
760    fn jslt<'a>(
761        &mut self,
762        _context: &mut Self::Context<'a>,
763        dst: Register,
764        src: Source,
765        offset: i16,
766    ) -> Result<(), String> {
767        self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x < y))
768    }
769    #[inline(always)]
770    fn jslt64<'a>(
771        &mut self,
772        _context: &mut Self::Context<'a>,
773        dst: Register,
774        src: Source,
775        offset: i16,
776    ) -> Result<(), String> {
777        self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x < y))
778    }
779    #[inline(always)]
780    fn jset<'a>(
781        &mut self,
782        _context: &mut Self::Context<'a>,
783        dst: Register,
784        src: Source,
785        offset: i16,
786    ) -> Result<(), String> {
787        self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x & y != 0))
788    }
789    #[inline(always)]
790    fn jset64<'a>(
791        &mut self,
792        _context: &mut Self::Context<'a>,
793        dst: Register,
794        src: Source,
795        offset: i16,
796    ) -> Result<(), String> {
797        self.conditional_jump(dst, src, offset, |x, y| x & y != 0)
798    }
799
800    #[inline(always)]
801    fn atomic_add<'a>(
802        &mut self,
803        _context: &mut Self::Context<'a>,
804        fetch: bool,
805        dst: Register,
806        offset: i16,
807        src: Register,
808    ) -> Result<(), String> {
809        self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_add(v, Ordering::SeqCst))
810    }
811
812    #[inline(always)]
813    fn atomic_add64<'a>(
814        &mut self,
815        _context: &mut Self::Context<'a>,
816        fetch: bool,
817        dst: Register,
818        offset: i16,
819        src: Register,
820    ) -> Result<(), String> {
821        self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_add(v, Ordering::SeqCst))
822    }
823
824    #[inline(always)]
825    fn atomic_and<'a>(
826        &mut self,
827        _context: &mut Self::Context<'a>,
828        fetch: bool,
829        dst: Register,
830        offset: i16,
831        src: Register,
832    ) -> Result<(), String> {
833        self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_and(v, Ordering::SeqCst))
834    }
835
836    #[inline(always)]
837    fn atomic_and64<'a>(
838        &mut self,
839        _context: &mut Self::Context<'a>,
840        fetch: bool,
841        dst: Register,
842        offset: i16,
843        src: Register,
844    ) -> Result<(), String> {
845        self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_and(v, Ordering::SeqCst))
846    }
847
848    #[inline(always)]
849    fn atomic_or<'a>(
850        &mut self,
851        _context: &mut Self::Context<'a>,
852        fetch: bool,
853        dst: Register,
854        offset: i16,
855        src: Register,
856    ) -> Result<(), String> {
857        self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_or(v, Ordering::SeqCst))
858    }
859
860    #[inline(always)]
861    fn atomic_or64<'a>(
862        &mut self,
863        _context: &mut Self::Context<'a>,
864        fetch: bool,
865        dst: Register,
866        offset: i16,
867        src: Register,
868    ) -> Result<(), String> {
869        self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_or(v, Ordering::SeqCst))
870    }
871
872    #[inline(always)]
873    fn atomic_xor<'a>(
874        &mut self,
875        _context: &mut Self::Context<'a>,
876        fetch: bool,
877        dst: Register,
878        offset: i16,
879        src: Register,
880    ) -> Result<(), String> {
881        self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_xor(v, Ordering::SeqCst))
882    }
883
884    #[inline(always)]
885    fn atomic_xor64<'a>(
886        &mut self,
887        _context: &mut Self::Context<'a>,
888        fetch: bool,
889        dst: Register,
890        offset: i16,
891        src: Register,
892    ) -> Result<(), String> {
893        self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_xor(v, Ordering::SeqCst))
894    }
895
896    #[inline(always)]
897    fn atomic_xchg<'a>(
898        &mut self,
899        _context: &mut Self::Context<'a>,
900        fetch: bool,
901        dst: Register,
902        offset: i16,
903        src: Register,
904    ) -> Result<(), String> {
905        self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.swap(v, Ordering::SeqCst))
906    }
907
908    #[inline(always)]
909    fn atomic_xchg64<'a>(
910        &mut self,
911        _context: &mut Self::Context<'a>,
912        fetch: bool,
913        dst: Register,
914        offset: i16,
915        src: Register,
916    ) -> Result<(), String> {
917        self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.swap(v, Ordering::SeqCst))
918    }
919
920    #[inline(always)]
921    fn atomic_cmpxchg<'a>(
922        &mut self,
923        _context: &mut Self::Context<'a>,
924        dst: Register,
925        offset: i16,
926        src: Register,
927    ) -> Result<(), String> {
928        self.atomic_operation(false, dst, offset, src, |this, a, v| {
929            let r0 = this.reg(0).as_u32();
930            let r0 = match a.compare_exchange(r0, v, Ordering::SeqCst, Ordering::SeqCst) {
931                Ok(v) | Err(v) => v,
932            };
933            this.set_reg(0, r0.into());
934            0
935        })
936    }
937
938    #[inline(always)]
939    fn atomic_cmpxchg64<'a>(
940        &mut self,
941        _context: &mut Self::Context<'a>,
942        dst: Register,
943        offset: i16,
944        src: Register,
945    ) -> Result<(), String> {
946        self.atomic_operation64(false, dst, offset, src, |this, a, v| {
947            let r0 = this.reg(0).as_u64();
948            let r0 = match a.compare_exchange(r0, v, Ordering::SeqCst, Ordering::SeqCst) {
949                Ok(v) | Err(v) => v,
950            };
951            this.set_reg(0, r0.into());
952            0
953        })
954    }
955
956    #[inline(always)]
957    fn load<'a>(
958        &mut self,
959        _context: &mut Self::Context<'a>,
960        dst: Register,
961        offset: i16,
962        src: Register,
963        width: DataWidth,
964    ) -> Result<(), String> {
965        let addr = self.reg(src);
966        let loaded = self.load_memory(addr, offset as u64, width);
967        self.set_reg(dst, loaded);
968        Ok(())
969    }
970
971    #[inline(always)]
972    fn load64<'a>(
973        &mut self,
974        _context: &mut Self::Context<'a>,
975        dst: Register,
976        _src: u8,
977        lower: u32,
978    ) -> Result<(), String> {
979        let value = (lower as u64) | (((self.code[self.pc + 1].imm() as u32) as u64) << 32);
980        self.set_reg(dst, value.into());
981        self.advance_pc(1);
982        Ok(())
983    }
984
985    #[inline(always)]
986    fn load_from_packet<'a>(
987        &mut self,
988        context: &mut Self::Context<'a>,
989        dst_reg: Register,
990        src_reg: Register,
991        offset: i32,
992        register_offset: Option<Register>,
993        width: DataWidth,
994    ) -> Result<(), String> {
995        let Some(offset) =
996            register_offset.map(|r| self.reg(r).as_i32()).unwrap_or(0).checked_add(offset as i32)
997        else {
998            // Offset overflowed. Exit.
999            self.result = Some(self.reg(0).as_u64());
1000            return Ok(());
1001        };
1002        let src_reg = self.reg(src_reg);
1003        // SAFETY: The verifier checks that the `src_reg` points at packet.
1004        let packet = unsafe { C::Packet::from_bpf_value(context, src_reg) };
1005        if let Some(value) = packet.load(offset, width) {
1006            self.set_reg(dst_reg, value.into());
1007        } else {
1008            self.result = Some(self.reg(0).as_u64());
1009        }
1010        Ok(())
1011    }
1012
1013    #[inline(always)]
1014    fn store<'a>(
1015        &mut self,
1016        _context: &mut Self::Context<'a>,
1017        dst: Register,
1018        offset: i16,
1019        src: Source,
1020        width: DataWidth,
1021    ) -> Result<(), String> {
1022        let src = self.compute_source(src);
1023        let dst = self.reg(dst);
1024        self.store_memory(dst, src, offset as u64, width);
1025        Ok(())
1026    }
1027}
1028
1029#[inline(always)]
1030fn alu32(x: u64, y: u64, op: impl FnOnce(u32, u32) -> u32) -> u64 {
1031    op(x as u32, y as u32) as u64
1032}
1033
1034#[inline(always)]
1035fn comp32(x: u64, y: u64, op: impl FnOnce(u32, u32) -> bool) -> bool {
1036    op(x as u32, y as u32)
1037}
1038
1039#[inline(always)]
1040fn scomp64(x: u64, y: u64, op: impl FnOnce(i64, i64) -> bool) -> bool {
1041    op(x as i64, y as i64)
1042}
1043
1044#[inline(always)]
1045fn scomp32(x: u64, y: u64, op: impl FnOnce(i32, i32) -> bool) -> bool {
1046    op(x as i32, y as i32)
1047}