1use crate::visitor::{BpfVisitor, ProgramCounter, Register, Source};
6use crate::{
7 BPF_STACK_SIZE, BpfValue, DataWidth, EbpfInstruction, EbpfProgramContext, FromBpfValue,
8 GENERAL_REGISTER_COUNT, HelperSet, Packet,
9};
10use byteorder::{BigEndian, ByteOrder, LittleEndian};
11use std::mem::MaybeUninit;
12use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
13use zerocopy::IntoBytes;
14
15pub fn execute<C: EbpfProgramContext>(
16 code: &[EbpfInstruction],
17 helpers: &HelperSet<C>,
18 run_context: &mut C::RunContext<'_>,
19 arguments: &[BpfValue],
20) -> u64 {
21 assert!(arguments.len() < 5);
22 let mut context = ComputationContext {
23 code,
24 helpers,
25 registers: Default::default(),
26 stack: [MaybeUninit::uninit(); BPF_STACK_SIZE / std::mem::size_of::<BpfValue>()],
27 pc: 0,
28 result: None,
29 };
30 for (i, v) in arguments.iter().enumerate() {
31 context.set_reg((i as u8) + 1, *v);
33 }
34
35 context.registers[10] =
37 BpfValue::from((context.stack.as_mut_ptr() as u64) + (BPF_STACK_SIZE as u64));
38
39 loop {
40 if let Some(result) = context.result {
41 return result;
42 }
43 context.visit(run_context, code[context.pc]).expect("verifier should have found an issue");
44 context.next();
45 }
46}
47
48impl BpfValue {
49 #[inline(always)]
50 pub fn add(&self, offset: u64) -> Self {
51 Self::from(self.as_u64().overflowing_add(offset).0)
52 }
53}
54
55struct ComputationContext<'a, C: EbpfProgramContext> {
57 code: &'a [EbpfInstruction],
59 helpers: &'a HelperSet<C>,
61 registers: [BpfValue; GENERAL_REGISTER_COUNT as usize + 1],
63 stack: [MaybeUninit<BpfValue>; BPF_STACK_SIZE / std::mem::size_of::<BpfValue>()],
65 pc: ProgramCounter,
67 result: Option<u64>,
69}
70
71impl<C: EbpfProgramContext> ComputationContext<'_, C> {
72 #[inline(always)]
73 fn reg(&mut self, index: Register) -> BpfValue {
74 self.registers[index as usize]
75 }
76
77 #[inline(always)]
78 fn set_reg(&mut self, index: Register, value: BpfValue) {
79 self.registers[index as usize] = value;
80 }
81
82 #[inline(always)]
83 fn next(&mut self) {
84 self.advance_pc(1)
85 }
86
87 #[inline(always)]
89 fn advance_pc(&mut self, offset: i16) {
90 let mut pc = self.pc as i64;
91 pc += offset as i64;
92 self.pc = pc as usize;
93 }
94
95 #[inline(always)]
96 fn store_memory(
97 &mut self,
98 addr: BpfValue,
99 value: BpfValue,
100 instruction_offset: u64,
101 width: DataWidth,
102 ) {
103 let addr = addr.add(instruction_offset);
108 #[allow(
109 clippy::undocumented_unsafe_blocks,
110 reason = "Force documented unsafe blocks in Starnix"
111 )]
112 match width {
113 DataWidth::U8 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u8()) },
114 DataWidth::U16 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u16()) },
115 DataWidth::U32 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u32()) },
116 DataWidth::U64 => unsafe { std::ptr::write_unaligned(addr.as_ptr(), value.as_u64()) },
117 }
118 }
119
120 #[inline(always)]
121 fn load_memory(&self, addr: BpfValue, instruction_offset: u64, width: DataWidth) -> BpfValue {
122 let addr = addr.add(instruction_offset);
127 match width {
128 DataWidth::U8 =>
129 {
130 #[allow(
131 clippy::undocumented_unsafe_blocks,
132 reason = "Force documented unsafe blocks in Starnix"
133 )]
134 BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u8>()) })
135 }
136 DataWidth::U16 =>
137 {
138 #[allow(
139 clippy::undocumented_unsafe_blocks,
140 reason = "Force documented unsafe blocks in Starnix"
141 )]
142 BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u16>()) })
143 }
144 DataWidth::U32 =>
145 {
146 #[allow(
147 clippy::undocumented_unsafe_blocks,
148 reason = "Force documented unsafe blocks in Starnix"
149 )]
150 BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u32>()) })
151 }
152 DataWidth::U64 =>
153 {
154 #[allow(
155 clippy::undocumented_unsafe_blocks,
156 reason = "Force documented unsafe blocks in Starnix"
157 )]
158 BpfValue::from(unsafe { std::ptr::read_unaligned(addr.as_ptr::<u64>()) })
159 }
160 }
161 }
162
163 #[inline(always)]
164 fn compute_source(&mut self, src: Source) -> BpfValue {
165 match src {
166 Source::Reg(reg) => self.reg(reg),
167 Source::Value(v) => v.into(),
168 }
169 }
170
171 #[inline(always)]
172 fn alu(
173 &mut self,
174 dst: Register,
175 src: Source,
176 op: impl Fn(u64, u64) -> u64,
177 ) -> Result<(), String> {
178 let op1 = self.reg(dst).as_u64();
179 let op2 = self.compute_source(src).as_u64();
180 let result = op(op1, op2);
181 self.set_reg(dst, result.into());
182 Ok(())
183 }
184
185 #[inline(always)]
186 fn atomic_operation(
187 &mut self,
188 fetch: bool,
189 dst: Register,
190 offset: i16,
191 src: Register,
192 op: impl Fn(&mut Self, &AtomicU32, u32) -> u32,
193 ) -> Result<(), String> {
194 let addr = self.reg(dst).add(offset as u64);
195 if addr.as_usize() % std::mem::size_of::<AtomicU32>() != 0 {
197 return Err(format!("misaligned access"));
198 }
199 #[allow(
204 clippy::undocumented_unsafe_blocks,
205 reason = "Force documented unsafe blocks in Starnix"
206 )]
207 let atomic = unsafe { &*addr.as_ptr::<AtomicU32>() };
208 let value = self.reg(src).as_u32();
209 let old_value = op(self, atomic, value);
210 if fetch {
211 self.set_reg(src, old_value.into());
212 }
213 Ok(())
214 }
215
216 #[inline(always)]
217 fn atomic_operation64(
218 &mut self,
219 fetch: bool,
220 dst: Register,
221 offset: i16,
222 src: Register,
223 op: impl Fn(&mut Self, &AtomicU64, u64) -> u64,
224 ) -> Result<(), String> {
225 let addr = self.reg(dst).add(offset as u64);
226 if addr.as_usize() % std::mem::size_of::<AtomicU64>() != 0 {
228 return Err(format!("misaligned access"));
229 }
230 #[allow(
235 clippy::undocumented_unsafe_blocks,
236 reason = "Force documented unsafe blocks in Starnix"
237 )]
238 let atomic = unsafe { &*addr.as_ptr::<AtomicU64>() };
239 let value = self.reg(src).as_u64();
240 let old_value = op(self, atomic, value);
241 if fetch {
242 self.set_reg(src, old_value.into());
243 }
244 Ok(())
245 }
246
247 #[inline(always)]
248 fn endianness<BO: ByteOrder>(&mut self, dst: Register, width: DataWidth) -> Result<(), String> {
249 let value = self.reg(dst);
250 let new_value = match width {
251 DataWidth::U16 => BO::read_u16((value.as_u64() as u16).as_bytes()) as u64,
252 DataWidth::U32 => BO::read_u32((value.as_u64() as u32).as_bytes()) as u64,
253 DataWidth::U64 => BO::read_u64(value.as_u64().as_bytes()),
254 _ => {
255 panic!("Unexpected bit width for endianness operation");
256 }
257 };
258 self.set_reg(dst, new_value.into());
259 Ok(())
260 }
261
262 #[inline(always)]
263 fn conditional_jump(
264 &mut self,
265 dst: Register,
266 src: Source,
267 offset: i16,
268 op: impl Fn(u64, u64) -> bool,
269 ) -> Result<(), String> {
270 let op1 = self.reg(dst).as_u64();
271 let op2 = self.compute_source(src.clone()).as_u64();
272 if op(op1, op2) {
273 self.advance_pc(offset);
274 }
275 Ok(())
276 }
277}
278
279impl<C: EbpfProgramContext> BpfVisitor for ComputationContext<'_, C> {
280 type Context<'a> = C::RunContext<'a>;
281
282 #[inline(always)]
283 fn add<'a>(
284 &mut self,
285 _context: &mut Self::Context<'a>,
286 dst: Register,
287 src: Source,
288 ) -> Result<(), String> {
289 self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_add(y).0))
290 }
291 #[inline(always)]
292 fn add64<'a>(
293 &mut self,
294 _context: &mut Self::Context<'a>,
295 dst: Register,
296 src: Source,
297 ) -> Result<(), String> {
298 self.alu(dst, src, |x, y| x.overflowing_add(y).0)
299 }
300 #[inline(always)]
301 fn and<'a>(
302 &mut self,
303 _context: &mut Self::Context<'a>,
304 dst: Register,
305 src: Source,
306 ) -> Result<(), String> {
307 self.alu(dst, src, |x, y| alu32(x, y, |x, y| x & y))
308 }
309 #[inline(always)]
310 fn and64<'a>(
311 &mut self,
312 _context: &mut Self::Context<'a>,
313 dst: Register,
314 src: Source,
315 ) -> Result<(), String> {
316 self.alu(dst, src, |x, y| x & y)
317 }
318 #[inline(always)]
319 fn arsh<'a>(
320 &mut self,
321 _context: &mut Self::Context<'a>,
322 dst: Register,
323 src: Source,
324 ) -> Result<(), String> {
325 self.alu(dst, src, |x, y| {
326 alu32(x, y, |x, y| {
327 let x = x as i32;
328 x.overflowing_shr(y).0 as u32
329 })
330 })
331 }
332 #[inline(always)]
333 fn arsh64<'a>(
334 &mut self,
335 _context: &mut Self::Context<'a>,
336 dst: Register,
337 src: Source,
338 ) -> Result<(), String> {
339 self.alu(dst, src, |x, y| {
340 let x = x as i64;
341 let y = y as u32;
344 x.overflowing_shr(y).0 as u64
345 })
346 }
347 #[inline(always)]
348 fn div<'a>(
349 &mut self,
350 _context: &mut Self::Context<'a>,
351 dst: Register,
352 src: Source,
353 ) -> Result<(), String> {
354 self.alu(dst, src, |x, y| alu32(x, y, |x, y| if y == 0 { 0 } else { x / y }))
355 }
356 #[inline(always)]
357 fn div64<'a>(
358 &mut self,
359 _context: &mut Self::Context<'a>,
360 dst: Register,
361 src: Source,
362 ) -> Result<(), String> {
363 self.alu(dst, src, |x, y| if y == 0 { 0 } else { x / y })
364 }
365 #[inline(always)]
366 fn lsh<'a>(
367 &mut self,
368 _context: &mut Self::Context<'a>,
369 dst: Register,
370 src: Source,
371 ) -> Result<(), String> {
372 self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_shl(y).0))
373 }
374 #[inline(always)]
375 fn lsh64<'a>(
376 &mut self,
377 _context: &mut Self::Context<'a>,
378 dst: Register,
379 src: Source,
380 ) -> Result<(), String> {
381 self.alu(dst, src, |x, y| {
382 let y = y as u32;
385 x.overflowing_shl(y).0
386 })
387 }
388 #[inline(always)]
389 fn r#mod<'a>(
390 &mut self,
391 _context: &mut Self::Context<'a>,
392 dst: Register,
393 src: Source,
394 ) -> Result<(), String> {
395 self.alu(dst, src, |x, y| alu32(x, y, |x, y| if y == 0 { x } else { x % y }))
396 }
397 #[inline(always)]
398 fn mod64<'a>(
399 &mut self,
400 _context: &mut Self::Context<'a>,
401 dst: Register,
402 src: Source,
403 ) -> Result<(), String> {
404 self.alu(dst, src, |x, y| if y == 0 { x } else { x % y })
405 }
406 #[inline(always)]
407 fn mov<'a>(
408 &mut self,
409 _context: &mut Self::Context<'a>,
410 dst: Register,
411 src: Source,
412 ) -> Result<(), String> {
413 self.alu(dst, src, |x, y| alu32(x, y, |_x, y| y))
414 }
415 #[inline(always)]
416 fn mov64<'a>(
417 &mut self,
418 _context: &mut Self::Context<'a>,
419 dst: Register,
420 src: Source,
421 ) -> Result<(), String> {
422 self.alu(dst, src, |_x, y| y)
423 }
424 #[inline(always)]
425 fn mul<'a>(
426 &mut self,
427 _context: &mut Self::Context<'a>,
428 dst: Register,
429 src: Source,
430 ) -> Result<(), String> {
431 self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_mul(y).0))
432 }
433 #[inline(always)]
434 fn mul64<'a>(
435 &mut self,
436 _context: &mut Self::Context<'a>,
437 dst: Register,
438 src: Source,
439 ) -> Result<(), String> {
440 self.alu(dst, src, |x, y| x.overflowing_mul(y).0)
441 }
442 #[inline(always)]
443 fn or<'a>(
444 &mut self,
445 _context: &mut Self::Context<'a>,
446 dst: Register,
447 src: Source,
448 ) -> Result<(), String> {
449 self.alu(dst, src, |x, y| alu32(x, y, |x, y| x | y))
450 }
451 #[inline(always)]
452 fn or64<'a>(
453 &mut self,
454 _context: &mut Self::Context<'a>,
455 dst: Register,
456 src: Source,
457 ) -> Result<(), String> {
458 self.alu(dst, src, |x, y| x | y)
459 }
460 #[inline(always)]
461 fn rsh<'a>(
462 &mut self,
463 _context: &mut Self::Context<'a>,
464 dst: Register,
465 src: Source,
466 ) -> Result<(), String> {
467 self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_shr(y).0))
468 }
469 #[inline(always)]
470 fn rsh64<'a>(
471 &mut self,
472 _context: &mut Self::Context<'a>,
473 dst: Register,
474 src: Source,
475 ) -> Result<(), String> {
476 self.alu(dst, src, |x, y| {
477 let y = y as u32;
480 x.overflowing_shr(y).0
481 })
482 }
483 #[inline(always)]
484 fn sub<'a>(
485 &mut self,
486 _context: &mut Self::Context<'a>,
487 dst: Register,
488 src: Source,
489 ) -> Result<(), String> {
490 self.alu(dst, src, |x, y| alu32(x, y, |x, y| x.overflowing_sub(y).0))
491 }
492 #[inline(always)]
493 fn sub64<'a>(
494 &mut self,
495 _context: &mut Self::Context<'a>,
496 dst: Register,
497 src: Source,
498 ) -> Result<(), String> {
499 self.alu(dst, src, |x, y| x.overflowing_sub(y).0)
500 }
501 #[inline(always)]
502 fn xor<'a>(
503 &mut self,
504 _context: &mut Self::Context<'a>,
505 dst: Register,
506 src: Source,
507 ) -> Result<(), String> {
508 self.alu(dst, src, |x, y| alu32(x, y, |x, y| x ^ y))
509 }
510 #[inline(always)]
511 fn xor64<'a>(
512 &mut self,
513 _context: &mut Self::Context<'a>,
514 dst: Register,
515 src: Source,
516 ) -> Result<(), String> {
517 self.alu(dst, src, |x, y| x ^ y)
518 }
519
520 #[inline(always)]
521 fn neg<'a>(&mut self, _context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
522 self.alu(dst, Source::Value(0), |x, y| {
523 alu32(x, y, |x, _y| (x as i32).overflowing_neg().0 as u32)
524 })
525 }
526 #[inline(always)]
527 fn neg64<'a>(&mut self, _context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
528 self.alu(dst, Source::Value(0), |x, _y| (x as i64).overflowing_neg().0 as u64)
529 }
530
531 #[inline(always)]
532 fn be<'a>(
533 &mut self,
534 _context: &mut Self::Context<'a>,
535 dst: Register,
536 width: DataWidth,
537 ) -> Result<(), String> {
538 self.endianness::<BigEndian>(dst, width)
539 }
540
541 #[inline(always)]
542 fn le<'a>(
543 &mut self,
544 _context: &mut Self::Context<'a>,
545 dst: Register,
546 width: DataWidth,
547 ) -> Result<(), String> {
548 self.endianness::<LittleEndian>(dst, width)
549 }
550
551 #[inline(always)]
552 fn call_external<'a>(
553 &mut self,
554 context: &mut Self::Context<'a>,
555 index: u32,
556 ) -> Result<(), String> {
557 let helper = &self.helpers.get_by_index(index).unwrap();
558 let result =
559 helper.0(context, self.reg(1), self.reg(2), self.reg(3), self.reg(4), self.reg(5));
560 self.set_reg(0, result);
561 Ok(())
562 }
563
564 #[inline(always)]
565 fn exit<'a>(&mut self, _context: &mut Self::Context<'a>) -> Result<(), String> {
566 self.result = Some(self.reg(0).as_u64());
567 Ok(())
568 }
569
570 #[inline(always)]
571 fn jump<'a>(&mut self, _context: &mut Self::Context<'a>, offset: i16) -> Result<(), String> {
572 self.advance_pc(offset);
573 Ok(())
574 }
575
576 #[inline(always)]
577 fn jeq<'a>(
578 &mut self,
579 _context: &mut Self::Context<'a>,
580 dst: Register,
581 src: Source,
582 offset: i16,
583 ) -> Result<(), String> {
584 self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x == y))
585 }
586 #[inline(always)]
587 fn jeq64<'a>(
588 &mut self,
589 _context: &mut Self::Context<'a>,
590 dst: Register,
591 src: Source,
592 offset: i16,
593 ) -> Result<(), String> {
594 self.conditional_jump(dst, src, offset, |x, y| x == y)
595 }
596 #[inline(always)]
597 fn jne<'a>(
598 &mut self,
599 _context: &mut Self::Context<'a>,
600 dst: Register,
601 src: Source,
602 offset: i16,
603 ) -> Result<(), String> {
604 self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x != y))
605 }
606 #[inline(always)]
607 fn jne64<'a>(
608 &mut self,
609 _context: &mut Self::Context<'a>,
610 dst: Register,
611 src: Source,
612 offset: i16,
613 ) -> Result<(), String> {
614 self.conditional_jump(dst, src, offset, |x, y| x != y)
615 }
616 #[inline(always)]
617 fn jge<'a>(
618 &mut self,
619 _context: &mut Self::Context<'a>,
620 dst: Register,
621 src: Source,
622 offset: i16,
623 ) -> Result<(), String> {
624 self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x >= y))
625 }
626 #[inline(always)]
627 fn jge64<'a>(
628 &mut self,
629 _context: &mut Self::Context<'a>,
630 dst: Register,
631 src: Source,
632 offset: i16,
633 ) -> Result<(), String> {
634 self.conditional_jump(dst, src, offset, |x, y| x >= y)
635 }
636 #[inline(always)]
637 fn jgt<'a>(
638 &mut self,
639 _context: &mut Self::Context<'a>,
640 dst: Register,
641 src: Source,
642 offset: i16,
643 ) -> Result<(), String> {
644 self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x > y))
645 }
646 #[inline(always)]
647 fn jgt64<'a>(
648 &mut self,
649 _context: &mut Self::Context<'a>,
650 dst: Register,
651 src: Source,
652 offset: i16,
653 ) -> Result<(), String> {
654 self.conditional_jump(dst, src, offset, |x, y| x > y)
655 }
656 #[inline(always)]
657 fn jle<'a>(
658 &mut self,
659 _context: &mut Self::Context<'a>,
660 dst: Register,
661 src: Source,
662 offset: i16,
663 ) -> Result<(), String> {
664 self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x <= y))
665 }
666 #[inline(always)]
667 fn jle64<'a>(
668 &mut self,
669 _context: &mut Self::Context<'a>,
670 dst: Register,
671 src: Source,
672 offset: i16,
673 ) -> Result<(), String> {
674 self.conditional_jump(dst, src, offset, |x, y| x <= y)
675 }
676 #[inline(always)]
677 fn jlt<'a>(
678 &mut self,
679 _context: &mut Self::Context<'a>,
680 dst: Register,
681 src: Source,
682 offset: i16,
683 ) -> Result<(), String> {
684 self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x < y))
685 }
686 #[inline(always)]
687 fn jlt64<'a>(
688 &mut self,
689 _context: &mut Self::Context<'a>,
690 dst: Register,
691 src: Source,
692 offset: i16,
693 ) -> Result<(), String> {
694 self.conditional_jump(dst, src, offset, |x, y| x < y)
695 }
696 #[inline(always)]
697 fn jsge<'a>(
698 &mut self,
699 _context: &mut Self::Context<'a>,
700 dst: Register,
701 src: Source,
702 offset: i16,
703 ) -> Result<(), String> {
704 self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x >= y))
705 }
706 #[inline(always)]
707 fn jsge64<'a>(
708 &mut self,
709 _context: &mut Self::Context<'a>,
710 dst: Register,
711 src: Source,
712 offset: i16,
713 ) -> Result<(), String> {
714 self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x >= y))
715 }
716 #[inline(always)]
717 fn jsgt<'a>(
718 &mut self,
719 _context: &mut Self::Context<'a>,
720 dst: Register,
721 src: Source,
722 offset: i16,
723 ) -> Result<(), String> {
724 self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x > y))
725 }
726 #[inline(always)]
727 fn jsgt64<'a>(
728 &mut self,
729 _context: &mut Self::Context<'a>,
730 dst: Register,
731 src: Source,
732 offset: i16,
733 ) -> Result<(), String> {
734 self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x > y))
735 }
736 #[inline(always)]
737 fn jsle<'a>(
738 &mut self,
739 _context: &mut Self::Context<'a>,
740 dst: Register,
741 src: Source,
742 offset: i16,
743 ) -> Result<(), String> {
744 self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x <= y))
745 }
746 #[inline(always)]
747 fn jsle64<'a>(
748 &mut self,
749 _context: &mut Self::Context<'a>,
750 dst: Register,
751 src: Source,
752 offset: i16,
753 ) -> Result<(), String> {
754 self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x <= y))
755 }
756 #[inline(always)]
757 fn jslt<'a>(
758 &mut self,
759 _context: &mut Self::Context<'a>,
760 dst: Register,
761 src: Source,
762 offset: i16,
763 ) -> Result<(), String> {
764 self.conditional_jump(dst, src, offset, |x, y| scomp32(x, y, |x, y| x < y))
765 }
766 #[inline(always)]
767 fn jslt64<'a>(
768 &mut self,
769 _context: &mut Self::Context<'a>,
770 dst: Register,
771 src: Source,
772 offset: i16,
773 ) -> Result<(), String> {
774 self.conditional_jump(dst, src, offset, |x, y| scomp64(x, y, |x, y| x < y))
775 }
776 #[inline(always)]
777 fn jset<'a>(
778 &mut self,
779 _context: &mut Self::Context<'a>,
780 dst: Register,
781 src: Source,
782 offset: i16,
783 ) -> Result<(), String> {
784 self.conditional_jump(dst, src, offset, |x, y| comp32(x, y, |x, y| x & y != 0))
785 }
786 #[inline(always)]
787 fn jset64<'a>(
788 &mut self,
789 _context: &mut Self::Context<'a>,
790 dst: Register,
791 src: Source,
792 offset: i16,
793 ) -> Result<(), String> {
794 self.conditional_jump(dst, src, offset, |x, y| x & y != 0)
795 }
796
797 #[inline(always)]
798 fn atomic_add<'a>(
799 &mut self,
800 _context: &mut Self::Context<'a>,
801 fetch: bool,
802 dst: Register,
803 offset: i16,
804 src: Register,
805 ) -> Result<(), String> {
806 self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_add(v, Ordering::SeqCst))
807 }
808
809 #[inline(always)]
810 fn atomic_add64<'a>(
811 &mut self,
812 _context: &mut Self::Context<'a>,
813 fetch: bool,
814 dst: Register,
815 offset: i16,
816 src: Register,
817 ) -> Result<(), String> {
818 self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_add(v, Ordering::SeqCst))
819 }
820
821 #[inline(always)]
822 fn atomic_and<'a>(
823 &mut self,
824 _context: &mut Self::Context<'a>,
825 fetch: bool,
826 dst: Register,
827 offset: i16,
828 src: Register,
829 ) -> Result<(), String> {
830 self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_and(v, Ordering::SeqCst))
831 }
832
833 #[inline(always)]
834 fn atomic_and64<'a>(
835 &mut self,
836 _context: &mut Self::Context<'a>,
837 fetch: bool,
838 dst: Register,
839 offset: i16,
840 src: Register,
841 ) -> Result<(), String> {
842 self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_and(v, Ordering::SeqCst))
843 }
844
845 #[inline(always)]
846 fn atomic_or<'a>(
847 &mut self,
848 _context: &mut Self::Context<'a>,
849 fetch: bool,
850 dst: Register,
851 offset: i16,
852 src: Register,
853 ) -> Result<(), String> {
854 self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_or(v, Ordering::SeqCst))
855 }
856
857 #[inline(always)]
858 fn atomic_or64<'a>(
859 &mut self,
860 _context: &mut Self::Context<'a>,
861 fetch: bool,
862 dst: Register,
863 offset: i16,
864 src: Register,
865 ) -> Result<(), String> {
866 self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_or(v, Ordering::SeqCst))
867 }
868
869 #[inline(always)]
870 fn atomic_xor<'a>(
871 &mut self,
872 _context: &mut Self::Context<'a>,
873 fetch: bool,
874 dst: Register,
875 offset: i16,
876 src: Register,
877 ) -> Result<(), String> {
878 self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.fetch_xor(v, Ordering::SeqCst))
879 }
880
881 #[inline(always)]
882 fn atomic_xor64<'a>(
883 &mut self,
884 _context: &mut Self::Context<'a>,
885 fetch: bool,
886 dst: Register,
887 offset: i16,
888 src: Register,
889 ) -> Result<(), String> {
890 self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.fetch_xor(v, Ordering::SeqCst))
891 }
892
893 #[inline(always)]
894 fn atomic_xchg<'a>(
895 &mut self,
896 _context: &mut Self::Context<'a>,
897 fetch: bool,
898 dst: Register,
899 offset: i16,
900 src: Register,
901 ) -> Result<(), String> {
902 self.atomic_operation(fetch, dst, offset, src, |_, a, v| a.swap(v, Ordering::SeqCst))
903 }
904
905 #[inline(always)]
906 fn atomic_xchg64<'a>(
907 &mut self,
908 _context: &mut Self::Context<'a>,
909 fetch: bool,
910 dst: Register,
911 offset: i16,
912 src: Register,
913 ) -> Result<(), String> {
914 self.atomic_operation64(fetch, dst, offset, src, |_, a, v| a.swap(v, Ordering::SeqCst))
915 }
916
917 #[inline(always)]
918 fn atomic_cmpxchg<'a>(
919 &mut self,
920 _context: &mut Self::Context<'a>,
921 dst: Register,
922 offset: i16,
923 src: Register,
924 ) -> Result<(), String> {
925 self.atomic_operation(false, dst, offset, src, |this, a, v| {
926 let r0 = this.reg(0).as_u32();
927 let r0 = match a.compare_exchange(r0, v, Ordering::SeqCst, Ordering::SeqCst) {
928 Ok(v) | Err(v) => v,
929 };
930 this.set_reg(0, r0.into());
931 0
932 })
933 }
934
935 #[inline(always)]
936 fn atomic_cmpxchg64<'a>(
937 &mut self,
938 _context: &mut Self::Context<'a>,
939 dst: Register,
940 offset: i16,
941 src: Register,
942 ) -> Result<(), String> {
943 self.atomic_operation64(false, dst, offset, src, |this, a, v| {
944 let r0 = this.reg(0).as_u64();
945 let r0 = match a.compare_exchange(r0, v, Ordering::SeqCst, Ordering::SeqCst) {
946 Ok(v) | Err(v) => v,
947 };
948 this.set_reg(0, r0.into());
949 0
950 })
951 }
952
953 #[inline(always)]
954 fn load<'a>(
955 &mut self,
956 _context: &mut Self::Context<'a>,
957 dst: Register,
958 offset: i16,
959 src: Register,
960 width: DataWidth,
961 ) -> Result<(), String> {
962 let addr = self.reg(src);
963 let loaded = self.load_memory(addr, offset as u64, width);
964 self.set_reg(dst, loaded);
965 Ok(())
966 }
967
968 #[inline(always)]
969 fn load64<'a>(
970 &mut self,
971 _context: &mut Self::Context<'a>,
972 dst: Register,
973 _src: u8,
974 lower: u32,
975 ) -> Result<(), String> {
976 let value = (lower as u64) | (((self.code[self.pc + 1].imm() as u32) as u64) << 32);
977 self.set_reg(dst, value.into());
978 self.advance_pc(1);
979 Ok(())
980 }
981
982 #[inline(always)]
983 fn load_from_packet<'a>(
984 &mut self,
985 context: &mut Self::Context<'a>,
986 dst_reg: Register,
987 src_reg: Register,
988 offset: i32,
989 register_offset: Option<Register>,
990 width: DataWidth,
991 ) -> Result<(), String> {
992 let Some(offset) =
993 register_offset.map(|r| self.reg(r).as_i32()).unwrap_or(0).checked_add(offset as i32)
994 else {
995 self.result = Some(self.reg(0).as_u64());
997 return Ok(());
998 };
999 let src_reg = self.reg(src_reg);
1000 let packet = unsafe { C::Packet::from_bpf_value(context, src_reg) };
1002 if let Some(value) = packet.load(offset, width) {
1003 self.set_reg(dst_reg, value.into());
1004 } else {
1005 self.result = Some(self.reg(0).as_u64());
1006 }
1007 Ok(())
1008 }
1009
1010 #[inline(always)]
1011 fn store<'a>(
1012 &mut self,
1013 _context: &mut Self::Context<'a>,
1014 dst: Register,
1015 offset: i16,
1016 src: Source,
1017 width: DataWidth,
1018 ) -> Result<(), String> {
1019 let src = self.compute_source(src);
1020 let dst = self.reg(dst);
1021 self.store_memory(dst, src, offset as u64, width);
1022 Ok(())
1023 }
1024}
1025
1026#[inline(always)]
1027fn alu32(x: u64, y: u64, op: impl FnOnce(u32, u32) -> u32) -> u64 {
1028 op(x as u32, y as u32) as u64
1029}
1030
1031#[inline(always)]
1032fn comp32(x: u64, y: u64, op: impl FnOnce(u32, u32) -> bool) -> bool {
1033 op(x as u32, y as u32)
1034}
1035
1036#[inline(always)]
1037fn scomp64(x: u64, y: u64, op: impl FnOnce(i64, i64) -> bool) -> bool {
1038 op(x as i64, y as i64)
1039}
1040
1041#[inline(always)]
1042fn scomp32(x: u64, y: u64, op: impl FnOnce(i32, i32) -> bool) -> bool {
1043 op(x as i32, y as i32)
1044}