1#![allow(unused_variables)]
6
7use crate::scalar_value::{ScalarValueData, U32Range, U32ScalarValueData, U64Range};
8use crate::visitor::{BpfVisitor, ProgramCounter, Register, Source};
9use crate::{
10 BPF_MAX_INSTS, BPF_PSEUDO_MAP_IDX, BPF_PSEUDO_MAP_IDX_VALUE, BPF_STACK_SIZE, DataWidth,
11 EbpfError, EbpfInstruction, GENERAL_REGISTER_COUNT, MapSchema, REGISTER_COUNT,
12};
13use byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian};
14use fuchsia_sync::Mutex;
15use linux_uapi::bpf_map_type_BPF_MAP_TYPE_ARRAY;
16use std::cmp::Ordering;
17use std::collections::{BTreeMap, HashMap, HashSet};
18use std::sync::Arc;
19use zerocopy::IntoBytes;
20
21const U32_MAX: u64 = u32::MAX as u64;
22
23pub trait VerifierLogger {
25 fn log(&mut self, line: &[u8]);
27}
28
29pub struct NullVerifierLogger;
31
32impl VerifierLogger for NullVerifierLogger {
33 fn log(&mut self, line: &[u8]) {
34 debug_assert!(line.is_ascii());
35 }
36}
37
38#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
42pub struct MemoryId {
43 id: u64,
44 parent: Option<Box<MemoryId>>,
45}
46
47impl MemoryId {
48 pub fn id(&self) -> u64 {
49 self.id
50 }
51}
52
53impl From<u64> for MemoryId {
54 fn from(id: u64) -> Self {
55 Self { id, parent: None }
56 }
57}
58
59static BPF_TYPE_IDENTIFIER_COUNTER: std::sync::atomic::AtomicU64 =
63 std::sync::atomic::AtomicU64::new(u64::MAX / 2);
64
65impl MemoryId {
66 pub fn new() -> MemoryId {
67 Self {
68 id: BPF_TYPE_IDENTIFIER_COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
69 parent: None,
70 }
71 }
72
73 fn prepended(&self, other: MemoryId) -> Self {
75 match &self.parent {
76 None => MemoryId { id: self.id, parent: Some(Box::new(other)) },
77 Some(parent) => {
78 MemoryId { id: self.id, parent: Some(Box::new(parent.prepended(other))) }
79 }
80 }
81 }
82
83 fn has_parent(&self, parent: &MemoryId) -> bool {
85 match &self.parent {
86 None => false,
87 Some(p) => p.as_ref() == parent,
88 }
89 }
90
91 fn matches(&self, other: &MemoryId) -> bool {
94 if self.id != other.id {
95 return false;
96 };
97 match (&self.parent, &other.parent) {
98 (Some(p1), Some(p2)) => p1.matches(p2.as_ref()),
99 _ => true,
100 }
101 }
102}
103
104#[derive(Clone, Debug, PartialEq)]
106pub enum FieldType {
107 Scalar { size: usize },
109
110 MutableScalar { size: usize },
112
113 PtrToMemory { is_32_bit: bool, id: MemoryId, buffer_size: usize },
115
116 NullablePtrToMemory { is_32_bit: bool, id: MemoryId, buffer_size: usize },
118
119 PtrToArray { is_32_bit: bool, id: MemoryId },
122
123 PtrToEndArray { is_32_bit: bool, id: MemoryId },
126}
127
128#[derive(Clone, Debug, PartialEq)]
130pub struct FieldDescriptor {
131 pub offset: usize,
133
134 pub field_type: FieldType,
137}
138
139impl FieldDescriptor {
140 fn size(&self) -> usize {
141 match self.field_type {
142 FieldType::Scalar { size } | FieldType::MutableScalar { size } => size,
143 FieldType::PtrToMemory { is_32_bit, .. }
144 | FieldType::NullablePtrToMemory { is_32_bit, .. }
145 | FieldType::PtrToArray { is_32_bit, .. }
146 | FieldType::PtrToEndArray { is_32_bit, .. } => {
147 if is_32_bit {
148 4
149 } else {
150 8
151 }
152 }
153 }
154 }
155}
156
157#[derive(Clone, Copy, Debug, Eq, PartialEq)]
159struct Field {
160 offset: i16,
161 width: DataWidth,
162}
163
164impl Field {
165 fn new(offset: i16, width: DataWidth) -> Self {
166 Self { offset, width }
167 }
168}
169
170#[derive(Debug, PartialEq, Default)]
172pub struct StructDescriptor {
173 pub fields: Vec<FieldDescriptor>,
175}
176
177impl StructDescriptor {
178 fn is_subtype(&self, super_struct: &StructDescriptor) -> bool {
180 for super_field in super_struct.fields.iter() {
182 if self.fields.iter().find(|field| *field == super_field).is_none() {
183 return false;
184 }
185 }
186 true
187 }
188
189 fn find_field(&self, base_offset: ScalarValueData, field: Field) -> Option<&FieldDescriptor> {
192 let offset = base_offset + field.offset;
193 let field_desc = self.fields.iter().find(|f| {
194 f.offset <= offset.min() as usize && (offset.max() as usize) < f.offset + f.size()
195 })?;
196 let is_valid_load = match field_desc.field_type {
197 FieldType::Scalar { size } | FieldType::MutableScalar { size } => {
198 ((offset + field.width.bytes()).max() as usize) <= field_desc.offset + size
200 }
201 FieldType::PtrToMemory { is_32_bit, .. }
202 | FieldType::NullablePtrToMemory { is_32_bit, .. }
203 | FieldType::PtrToArray { is_32_bit, .. }
204 | FieldType::PtrToEndArray { is_32_bit, .. } => {
205 let expected_width = if is_32_bit { DataWidth::U32 } else { DataWidth::U64 };
206 offset.is_known()
208 && offset.value as usize == field_desc.offset
209 && field.width == expected_width
210 }
211 };
212
213 is_valid_load.then_some(field_desc)
214 }
215}
216
217#[derive(Clone, Debug, PartialEq)]
218pub enum MemoryParameterSize {
219 Value(u64),
221 Reference { index: u8 },
223}
224
225impl MemoryParameterSize {
226 fn size(&self, context: &ComputationContext) -> Result<u64, String> {
227 match self {
228 Self::Value(size) => Ok(*size),
229 Self::Reference { index } => {
230 let size_type = context.reg(index + 1)?;
231 match size_type {
232 Type::ScalarValue(data) if data.is_known() => Ok(data.value),
233 _ => Err("cannot know buffer size".to_string()),
234 }
235 }
236 }
237 }
238}
239
240#[derive(Clone, Debug, PartialEq)]
241pub enum Type {
242 ScalarValue(ScalarValueData),
244 ConstPtrToMap { id: u64, schema: MapSchema },
246 PtrToStack { offset: StackOffset },
248 PtrToMemory { id: MemoryId, offset: ScalarValueData, buffer_size: u64 },
251 PtrToStruct { id: MemoryId, offset: ScalarValueData, descriptor: Arc<StructDescriptor> },
253 PtrToArray { id: MemoryId, offset: ScalarValueData },
257 PtrToEndArray { id: MemoryId },
260 NullOr { id: MemoryId, inner: Box<Type> },
262 Releasable { id: MemoryId, inner: Box<Type> },
265 ScalarValueParameter,
267 ConstPtrToMapParameter,
269 MapKeyParameter {
271 map_ptr_index: u8,
274 },
275 MapValueParameter {
277 map_ptr_index: u8,
280 },
281 MemoryParameter {
283 size: MemoryParameterSize,
286 input: bool,
288 output: bool,
290 },
291 AliasParameter {
293 parameter_index: u8,
295 },
296 NullOrParameter(Box<Type>),
298 StructParameter { id: MemoryId },
300 ReleasableParameter { id: MemoryId, inner: Box<Type> },
303 ReleaseParameter { id: MemoryId },
305}
306
307impl PartialOrd for Type {
321 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
322 if self == other {
324 return Some(Ordering::Equal);
325 }
326
327 if self == &Type::UNINITIALIZED {
329 return Some(Ordering::Greater);
330 }
331 if other == &Type::UNINITIALIZED {
332 return Some(Ordering::Less);
333 }
334
335 match (self, other) {
337 (Self::ScalarValue(data1), Self::ScalarValue(data2)) => data1.partial_cmp(data2),
338 _ => None,
339 }
340 }
341}
342
343impl From<ScalarValueData> for Type {
344 fn from(value: ScalarValueData) -> Self {
345 Self::ScalarValue(value)
346 }
347}
348
349impl From<u64> for Type {
350 fn from(value: u64) -> Self {
351 Self::ScalarValue(value.into())
352 }
353}
354
355impl Default for Type {
356 fn default() -> Self {
358 Self::UNINITIALIZED.clone()
359 }
360}
361
362impl Type {
363 pub const UNINITIALIZED: Self = Self::ScalarValue(ScalarValueData::UNINITIALIZED);
365
366 pub const UNKNOWN_SCALAR: Self = Self::ScalarValue(ScalarValueData::UNKNOWN_WRITTEN);
368
369 fn mask(width: DataWidth) -> u64 {
371 if width == DataWidth::U64 { u64::MAX } else { (1 << width.bits()) - 1 }
372 }
373
374 fn is_written_scalar(&self) -> bool {
376 match self {
377 Self::ScalarValue(data) if data.is_fully_initialized() => true,
378 _ => false,
379 }
380 }
381
382 pub fn is_subtype(&self, super_type: &Type) -> bool {
384 match (self, super_type) {
385 (_, Self::ScalarValue(data)) if data.is_uninitialized() => true,
387
388 (
389 Self::PtrToStruct { id: id1, offset: offset1, descriptor: descriptor1 },
390 Self::PtrToStruct { id: id2, offset: offset2, descriptor: descriptor2 },
391 ) => id1 == id2 && offset1 <= offset2 && descriptor1.is_subtype(descriptor2),
392
393 (self_type, super_type) if self_type == super_type => true,
395
396 _ => false,
397 }
398 }
399
400 pub fn is_non_zero(&self) -> bool {
402 match self {
403 Self::ScalarValue(d) => d.min() > 0,
404 Self::NullOr { .. } => false,
405 _ => true,
406 }
407 }
408
409 fn inner(&self, context: &ComputationContext) -> Result<&Type, String> {
410 match self {
411 Self::Releasable { id, inner } => {
412 if context.resources.contains(id) {
413 Ok(&inner)
414 } else {
415 Err("Access to released resource".to_string())
416 }
417 }
418 _ => Ok(self),
419 }
420 }
421
422 fn constraint(
425 context: &mut ComputationContext,
426 jump_type: JumpType,
427 jump_width: JumpWidth,
428 type1: Self,
429 type2: Self,
430 ) -> Result<(Self, Self), String> {
431 let result = match (jump_width, jump_type, type1.inner(context)?, type2.inner(context)?) {
432 (JumpWidth::W64, JumpType::Eq, Type::ScalarValue(data1), Type::ScalarValue(data2))
433 if data1.is_fully_initialized() && data2.is_fully_initialized() =>
434 {
435 let umin = std::cmp::max(data1.min(), data2.min());
436 let umax = std::cmp::min(data1.max(), data2.max());
437 let v = Type::ScalarValue(ScalarValueData::new(
438 data1.value | data2.value,
439 data1.unknown_mask & data2.unknown_mask,
440 0,
441 U64Range::new(umin, umax),
442 ));
443 (v.clone(), v)
444 }
445 (JumpWidth::W32, JumpType::Eq, Type::ScalarValue(data1), Type::ScalarValue(data2))
446 if data1.is_fully_initialized() && data2.is_fully_initialized() =>
447 {
448 let maybe_umin = if data1.min() <= U32_MAX && data2.min() <= U32_MAX {
449 Some(std::cmp::max(data1.min(), data2.min()))
450 } else {
451 None
452 };
453 let maybe_umax = if data1.max() <= U32_MAX && data2.max() <= U32_MAX {
454 Some(std::cmp::min(data1.max(), data2.max()))
455 } else {
456 None
457 };
458
459 let urange1 = U64Range::new(
460 maybe_umin.unwrap_or_else(|| data1.min()),
461 maybe_umax.unwrap_or_else(|| data1.max()),
462 );
463 let v1 = Type::ScalarValue(ScalarValueData::new(
464 data1.value | (data2.value & U32_MAX),
465 data1.unknown_mask & (data2.unknown_mask | (U32_MAX << 32)),
466 0,
467 urange1,
468 ));
469
470 let urange2 = U64Range::new(
471 maybe_umin.unwrap_or_else(|| data2.min()),
472 maybe_umax.unwrap_or_else(|| data2.max()),
473 );
474 let v2 = Type::ScalarValue(ScalarValueData::new(
475 data2.value | (data1.value & U32_MAX),
476 data2.unknown_mask & (data1.unknown_mask | (U32_MAX << 32)),
477 0,
478 urange2,
479 ));
480 (v1, v2)
481 }
482 (JumpWidth::W64, JumpType::Eq, Type::ScalarValue(data), Type::NullOr { id, .. })
483 | (JumpWidth::W64, JumpType::Eq, Type::NullOr { id, .. }, Type::ScalarValue(data))
484 if data.is_zero() =>
485 {
486 context.set_null(id, true);
487 let zero = Type::from(0);
488 (zero.clone(), zero)
489 }
490 (JumpWidth::W64, jump_type, Type::NullOr { id, inner }, Type::ScalarValue(data))
491 if jump_type.is_strict() && data.is_zero() =>
492 {
493 context.set_null(id, false);
494 let inner = *inner.clone();
495 inner.register_resource(context);
496 (inner, type2)
497 }
498 (JumpWidth::W64, jump_type, Type::ScalarValue(data), Type::NullOr { id, inner })
499 if jump_type.is_strict() && data.is_zero() =>
500 {
501 context.set_null(id, false);
502 let inner = *inner.clone();
503 inner.register_resource(context);
504 (type1, inner)
505 }
506
507 (JumpWidth::W64, JumpType::Lt, Type::ScalarValue(data1), Type::ScalarValue(data2))
508 | (JumpWidth::W64, JumpType::Gt, Type::ScalarValue(data2), Type::ScalarValue(data1)) => {
509 debug_assert!(data1.min() < u64::MAX);
510 debug_assert!(data2.max() > 0);
511 let new_max1 = std::cmp::min(data1.max(), data2.max() - 1);
512 debug_assert!(data1.min() <= new_max1);
513 let new_min2 = std::cmp::max(data2.min(), data1.min() + 1);
514 debug_assert!(new_min2 <= data2.max());
515 let new_range1 = U64Range::new(data1.min(), new_max1);
516 let new_range2 = U64Range::new(new_min2, data2.max());
517 (data1.update_range(new_range1).into(), data2.update_range(new_range2).into())
518 }
519
520 (JumpWidth::W64, JumpType::Le, Type::ScalarValue(data1), Type::ScalarValue(data2))
521 | (JumpWidth::W64, JumpType::Ge, Type::ScalarValue(data2), Type::ScalarValue(data1)) => {
522 let new_max1 = std::cmp::min(data1.max(), data2.max());
523 debug_assert!(data1.min() <= new_max1);
524 let new_min2 = std::cmp::max(data2.min(), data1.min());
525 debug_assert!(new_min2 <= data2.max());
526 let new_range1 = U64Range::new(data1.min(), new_max1);
527 let new_range2 = U64Range::new(new_min2, data2.max());
528 (data1.update_range(new_range1).into(), data2.update_range(new_range2).into())
529 }
530
531 (
532 JumpWidth::W64,
533 JumpType::Eq,
534 Type::PtrToArray { id: id1, offset },
535 Type::PtrToEndArray { id: id2 },
536 )
537 | (
538 JumpWidth::W64,
539 JumpType::Le,
540 Type::PtrToArray { id: id1, offset },
541 Type::PtrToEndArray { id: id2 },
542 )
543 | (
544 JumpWidth::W64,
545 JumpType::Ge,
546 Type::PtrToEndArray { id: id1 },
547 Type::PtrToArray { id: id2, offset },
548 ) if id1 == id2 => {
549 context.update_array_bounds(id1.clone(), *offset);
550 (type1, type2)
551 }
552 (
553 JumpWidth::W64,
554 JumpType::Lt,
555 Type::PtrToArray { id: id1, offset },
556 Type::PtrToEndArray { id: id2 },
557 )
558 | (
559 JumpWidth::W64,
560 JumpType::Gt,
561 Type::PtrToEndArray { id: id1 },
562 Type::PtrToArray { id: id2, offset },
563 ) if id1 == id2 => {
564 context.update_array_bounds(id1.clone(), *offset + 1);
565 (type1, type2)
566 }
567 (JumpWidth::W64, JumpType::Eq, _, _) => (type1.clone(), type1),
568 _ => (type1, type2),
569 };
570 Ok(result)
571 }
572
573 fn match_parameter_type(
574 &self,
575 context: &ComputationContext,
576 parameter_type: &Type,
577 index: usize,
578 next: &mut ComputationContext,
579 ) -> Result<(), String> {
580 match (parameter_type, self) {
581 (Type::NullOrParameter(t), Type::ScalarValue(data))
582 if data.is_known() && data.value == 0 =>
583 {
584 Ok(())
585 }
586 (Type::NullOrParameter(t), _) => self.match_parameter_type(context, t, index, next),
587 (Type::ScalarValueParameter, Type::ScalarValue(data))
588 if data.is_fully_initialized() =>
589 {
590 Ok(())
591 }
592 (Type::ConstPtrToMapParameter, Type::ConstPtrToMap { .. }) => Ok(()),
593 (
594 Type::MapKeyParameter { map_ptr_index },
595 Type::PtrToMemory { offset, buffer_size, .. },
596 ) => {
597 let schema = context.get_map_schema(*map_ptr_index)?;
598 context.check_memory_access(*offset, *buffer_size, 0, schema.key_size as usize)
599 }
600 (Type::MapKeyParameter { map_ptr_index }, Type::PtrToStack { offset }) => {
601 let schema = context.get_map_schema(*map_ptr_index)?;
602 context.stack.read_data_ptr(context.pc, *offset, schema.key_size as u64)
603 }
604 (
605 Type::MapValueParameter { map_ptr_index },
606 Type::PtrToMemory { offset, buffer_size, .. },
607 ) => {
608 let schema = context.get_map_schema(*map_ptr_index)?;
609 context.check_memory_access(*offset, *buffer_size, 0, schema.value_size as usize)
610 }
611 (Type::MapValueParameter { map_ptr_index }, Type::PtrToStack { offset }) => {
612 let schema = context.get_map_schema(*map_ptr_index)?;
613 context.stack.read_data_ptr(context.pc, *offset, schema.value_size as u64)
614 }
615 (Type::MemoryParameter { size, .. }, Type::PtrToMemory { offset, buffer_size, .. }) => {
616 let expected_size = size.size(context)?;
617 let size_left = ScalarValueData::from(*buffer_size) - offset;
618 if expected_size > size_left.min() {
619 return Err("out of bound read".to_string());
620 }
621 Ok(())
622 }
623
624 (Type::MemoryParameter { size, input, output }, Type::PtrToStack { offset }) => {
625 let size = size.size(context)?;
626 let buffer_end = offset.add(size);
627 if !buffer_end.is_valid() {
628 Err("out of bound access".to_string())
629 } else {
630 if *output {
631 next.stack.write_data_ptr(context.pc, *offset, size)?;
632 }
633 if *input {
634 context.stack.read_data_ptr(context.pc, *offset, size)?;
635 }
636 Ok(())
637 }
638 }
639 (
640 Type::StructParameter { id: id1 },
641 Type::PtrToMemory { id: id2, offset, .. }
642 | Type::PtrToStruct { id: id2, offset, .. },
643 ) if offset.is_zero() && id1.matches(id2) => Ok(()),
644 (
645 Type::ReleasableParameter { id: id1, inner: inner1 },
646 Type::Releasable { id: id2, inner: inner2 },
647 ) if id2.has_parent(id1) => {
648 if next.resources.contains(id2) {
649 inner2.match_parameter_type(context, inner1, index, next)
650 } else {
651 Err(format!("Resource already released for index {index}"))
652 }
653 }
654 (Type::ReleaseParameter { id: id1 }, Type::Releasable { id: id2, .. })
655 if id2.has_parent(id1) =>
656 {
657 if next.resources.remove(id2) {
658 Ok(())
659 } else {
660 Err(format!("{id2:?} Resource already released for index {index}"))
661 }
662 }
663 (_, Type::Releasable { inner, .. }) => {
664 inner.match_parameter_type(context, parameter_type, index, next)
665 }
666
667 _ => Err(format!("incorrect parameter for index {index}")),
668 }
669 }
670
671 fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
674 match self {
675 Type::NullOr { id, inner } if id == null_id => {
676 if is_null {
677 *self = Type::from(0);
678 } else {
679 *self = *inner.clone();
680 }
681 }
682 _ => {}
683 }
684 }
685
686 fn register_resource(&self, context: &mut ComputationContext) {
687 match self {
688 Type::Releasable { id, .. } => {
689 context.resources.insert(id.clone());
690 }
691 _ => {}
692 }
693 }
694
695 fn compare_list<'a>(
703 mut l1: impl Iterator<Item = &'a Self>,
704 mut l2: impl Iterator<Item = &'a Self>,
705 ) -> Option<Ordering> {
706 let mut result = Ordering::Equal;
707 loop {
708 match (l1.next(), l2.next()) {
709 (None, None) => return Some(result),
710 (_, None) | (None, _) => return None,
711 (Some(v1), Some(v2)) => {
712 result = associate_orderings(result, v1.partial_cmp(v2)?)?;
713 }
714 }
715 }
716 }
717}
718
719#[derive(Clone, Debug)]
720pub struct FunctionSignature {
721 pub args: Vec<Type>,
722 pub return_value: Type,
723 pub invalidate_array_bounds: bool,
724}
725
726#[derive(Debug, Default)]
727pub struct CallingContext {
728 pub maps: Vec<MapSchema>,
731 pub helpers: HashMap<u32, FunctionSignature>,
733 pub args: Vec<Type>,
735 pub packet_type: Option<Type>,
737}
738
739impl CallingContext {
740 pub fn register_map(&mut self, schema: MapSchema) -> usize {
741 let index = self.maps.len();
742 self.maps.push(schema);
743 index
744 }
745 pub fn set_helpers(&mut self, helpers: HashMap<u32, FunctionSignature>) {
746 self.helpers = helpers;
747 }
748 pub fn set_args(&mut self, args: &[Type]) {
749 debug_assert!(args.len() <= 5);
750 self.args = args.to_vec();
751 }
752 pub fn set_packet_type(&mut self, packet_type: Type) {
753 self.packet_type = Some(packet_type);
754 }
755}
756
757#[derive(Debug, PartialEq, Clone)]
758pub struct StructAccess {
759 pub pc: ProgramCounter,
760
761 pub memory_id: MemoryId,
763
764 pub field_offset: usize,
766
767 pub is_32_bit_ptr_load: bool,
769}
770
771#[derive(Debug)]
772pub struct VerifiedEbpfProgram {
773 pub(crate) code: Vec<EbpfInstruction>,
774 pub(crate) args: Vec<Type>,
775 pub(crate) struct_access_instructions: Vec<StructAccess>,
776 pub(crate) maps: Vec<MapSchema>,
777}
778
779impl VerifiedEbpfProgram {
780 pub fn to_code(self) -> Vec<EbpfInstruction> {
783 debug_assert!(self.struct_access_instructions.is_empty());
784 debug_assert!(self.maps.is_empty());
785 self.code
786 }
787
788 pub fn code(&self) -> &[EbpfInstruction] {
789 &self.code
790 }
791
792 pub fn struct_access_instructions(&self) -> &[StructAccess] {
793 &self.struct_access_instructions
794 }
795
796 pub fn from_verified_code(
797 code: Vec<EbpfInstruction>,
798 args: Vec<Type>,
799 struct_access_instructions: Vec<StructAccess>,
800 maps: Vec<MapSchema>,
801 ) -> Self {
802 Self { code, args, struct_access_instructions, maps }
803 }
804}
805
806pub fn verify_program(
809 code: Vec<EbpfInstruction>,
810 calling_context: CallingContext,
811 logger: &mut dyn VerifierLogger,
812) -> Result<VerifiedEbpfProgram, EbpfError> {
813 if code.len() > BPF_MAX_INSTS {
814 return error_and_log(logger, "ebpf program too long");
815 }
816
817 let mut context = ComputationContext::default();
818 for (i, t) in calling_context.args.iter().enumerate() {
819 context.set_reg((i + 1) as u8, t.clone()).map_err(EbpfError::ProgramVerifyError)?;
821 }
822 let states = vec![context];
823 let mut verification_context = VerificationContext {
824 calling_context,
825 logger,
826 states,
827 code: &code,
828 counter: 0,
829 iteration: 0,
830 terminating_contexts: Default::default(),
831 struct_access_instructions: Default::default(),
832 };
833 while let Some(mut context) = verification_context.states.pop() {
834 if let Some(terminating_contexts) =
835 verification_context.terminating_contexts.get(&context.pc)
836 {
837 if let Some(ending_context) =
840 terminating_contexts.iter().find(|c| c.computation_context >= context)
841 {
842 if let Some(parent) = context.parent.take() {
846 parent.dependencies.lock().push(ending_context.dependencies.clone());
847 if let Some(parent) = Arc::into_inner(parent) {
848 parent
849 .terminate(&mut verification_context)
850 .map_err(EbpfError::ProgramVerifyError)?;
851 }
852 }
853 continue;
854 }
855 }
856 if verification_context.iteration > 10 * BPF_MAX_INSTS {
857 return error_and_log(verification_context.logger, "bpf byte code does not terminate");
858 }
859 if context.pc >= code.len() {
860 return error_and_log(verification_context.logger, "pc out of bounds");
861 }
862 let visit_result = context.visit(&mut verification_context, code[context.pc]);
863 match visit_result {
864 Err(message) => {
865 let message = format!("at PC {}: {}", context.pc, message);
866 return error_and_log(verification_context.logger, message);
867 }
868 _ => {}
869 }
870 verification_context.iteration += 1;
871 }
872
873 let struct_access_instructions =
874 verification_context.struct_access_instructions.into_values().collect::<Vec<_>>();
875 let CallingContext { maps, args, .. } = verification_context.calling_context;
876 Ok(VerifiedEbpfProgram { code, struct_access_instructions, maps, args })
877}
878
879struct VerificationContext<'a> {
880 calling_context: CallingContext,
882 logger: &'a mut dyn VerifierLogger,
884 states: Vec<ComputationContext>,
886 code: &'a [EbpfInstruction],
888 counter: u64,
890 iteration: usize,
893 terminating_contexts: BTreeMap<ProgramCounter, Vec<TerminatingContext>>,
897 struct_access_instructions: HashMap<ProgramCounter, StructAccess>,
901}
902
903impl<'a> VerificationContext<'a> {
904 fn next_id(&mut self) -> u64 {
905 let id = self.counter;
906 self.counter += 1;
907 id
908 }
909
910 fn register_struct_access(&mut self, struct_access: StructAccess) -> Result<(), String> {
913 match self.struct_access_instructions.entry(struct_access.pc) {
914 std::collections::hash_map::Entry::Vacant(entry) => {
915 entry.insert(struct_access);
916 }
917 std::collections::hash_map::Entry::Occupied(entry) => {
918 if *entry.get() != struct_access {
919 return Err("Inconsistent struct field access".to_string());
920 }
921 }
922 }
923 Ok(())
924 }
925}
926
927const STACK_ELEMENT_SIZE: usize = std::mem::size_of::<u64>();
928const STACK_MAX_INDEX: usize = BPF_STACK_SIZE / STACK_ELEMENT_SIZE;
929
930#[derive(Clone, Copy, Debug, PartialEq)]
933pub struct StackOffset(ScalarValueData);
934
935impl Default for StackOffset {
936 fn default() -> Self {
937 Self(BPF_STACK_SIZE.into())
938 }
939}
940
941impl StackOffset {
942 fn is_valid(&self) -> bool {
944 self.0.is_known() && self.0.value <= (BPF_STACK_SIZE as u64)
945 }
946
947 fn reg(&self) -> ScalarValueData {
949 self.0
950 }
951
952 fn array_index(&self) -> usize {
955 debug_assert!(self.is_valid());
956 usize::try_from(self.0.value).unwrap() / STACK_ELEMENT_SIZE
957 }
958
959 fn sub_index(&self) -> usize {
961 debug_assert!(self.is_valid());
962 usize::try_from(self.0.value).unwrap() % STACK_ELEMENT_SIZE
963 }
964
965 fn add<T: Into<ScalarValueData>>(self, rhs: T) -> Self {
966 Self(self.0 + rhs)
967 }
968}
969
970#[derive(Clone, Debug, Default, PartialEq)]
972struct Stack {
973 data: HashMap<usize, Type>,
974}
975
976impl Stack {
977 fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
980 for (_, t) in self.data.iter_mut() {
981 t.set_null(null_id, is_null);
982 }
983 }
984
985 fn get(&self, index: usize) -> &Type {
986 self.data.get(&index).unwrap_or(&Type::UNINITIALIZED)
987 }
988
989 fn set(&mut self, index: usize, t: Type) {
990 if t == Type::UNINITIALIZED {
991 self.data.remove(&index);
992 } else {
993 self.data.insert(index, t);
994 }
995 }
996
997 fn extract_sub_value(value: u64, offset: usize, byte_count: usize) -> u64 {
998 NativeEndian::read_uint(&value.as_bytes()[offset..], byte_count)
999 }
1000
1001 fn insert_sub_value(mut original: u64, value: u64, width: DataWidth, offset: usize) -> u64 {
1002 let byte_count = width.bytes();
1003 let original_buf = original.as_mut_bytes();
1004 let value_buf = value.as_bytes();
1005 original_buf[offset..(byte_count + offset)].copy_from_slice(&value_buf[..byte_count]);
1006 original
1007 }
1008
1009 fn write_data_ptr(
1010 &mut self,
1011 pc: ProgramCounter,
1012 mut offset: StackOffset,
1013 bytes: u64,
1014 ) -> Result<(), String> {
1015 for i in 0..bytes {
1016 self.store(offset, Type::UNKNOWN_SCALAR, DataWidth::U8)?;
1017 offset = offset.add(1);
1018 }
1019 Ok(())
1020 }
1021
1022 fn read_data_ptr(
1023 &self,
1024 pc: ProgramCounter,
1025 offset: StackOffset,
1026 bytes: u64,
1027 ) -> Result<(), String> {
1028 let read_element =
1029 |index: usize, start_offset: usize, end_offset: usize| -> Result<(), String> {
1030 match self.get(index) {
1031 Type::ScalarValue(data) => {
1032 debug_assert!(end_offset > start_offset);
1033 let unwritten_bits = Self::extract_sub_value(
1034 data.unwritten_mask,
1035 start_offset,
1036 end_offset - start_offset,
1037 );
1038 if unwritten_bits == 0 {
1039 Ok(())
1040 } else {
1041 Err("reading unwritten value from the stack".to_string())
1042 }
1043 }
1044 _ => Err("invalid read from the stack".to_string()),
1045 }
1046 };
1047 if bytes == 0 {
1048 return Ok(());
1049 }
1050
1051 if bytes as usize > BPF_STACK_SIZE {
1052 return Err("stack overflow".to_string());
1053 }
1054
1055 if !offset.is_valid() {
1056 return Err("invalid stack offset".to_string());
1057 }
1058
1059 let end_offset = offset.add(bytes);
1060 if !end_offset.is_valid() {
1061 return Err("stack overflow".to_string())?;
1062 }
1063
1064 if offset.array_index() == end_offset.array_index() {
1068 return read_element(offset.array_index(), offset.sub_index(), end_offset.sub_index());
1069 }
1070
1071 read_element(offset.array_index(), offset.sub_index(), STACK_ELEMENT_SIZE)?;
1073
1074 if end_offset.sub_index() != 0 {
1076 read_element(end_offset.array_index(), 0, end_offset.sub_index())?;
1077 }
1078
1079 for i in (offset.array_index() + 1)..end_offset.array_index() {
1081 read_element(i, 0, STACK_ELEMENT_SIZE)?;
1082 }
1083
1084 Ok(())
1085 }
1086
1087 fn store(&mut self, offset: StackOffset, value: Type, width: DataWidth) -> Result<(), String> {
1088 if !offset.is_valid() {
1089 return Err("out of bounds store".to_string());
1090 }
1091 if offset.sub_index() % width.bytes() != 0 {
1092 return Err("misaligned access".to_string());
1093 }
1094
1095 let index = offset.array_index();
1096 if width == DataWidth::U64 {
1097 self.set(index, value);
1098 } else {
1099 match value {
1100 Type::ScalarValue(data) => {
1101 let old_data = match self.get(index) {
1102 Type::ScalarValue(data) => *data,
1103 _ => {
1104 ScalarValueData::UNINITIALIZED
1107 }
1108 };
1109 let sub_index = offset.sub_index();
1110 let value =
1111 Self::insert_sub_value(old_data.value, data.value, width, sub_index);
1112 let unknown_mask = Self::insert_sub_value(
1113 old_data.unknown_mask,
1114 data.unknown_mask,
1115 width,
1116 sub_index,
1117 );
1118 let unwritten_mask = Self::insert_sub_value(
1119 old_data.unwritten_mask,
1120 data.unwritten_mask,
1121 width,
1122 sub_index,
1123 );
1124 let urange = U64Range::compute_range_for_bytes_swap(
1125 old_data.urange,
1126 data.urange,
1127 sub_index,
1128 0,
1129 width.bytes(),
1130 );
1131 self.set(
1132 index,
1133 Type::ScalarValue(ScalarValueData::new(
1134 value,
1135 unknown_mask,
1136 unwritten_mask,
1137 urange,
1138 )),
1139 );
1140 }
1141 _ => {
1142 return Err("cannot store part of a non scalar value on the stack".to_string());
1143 }
1144 }
1145 }
1146 Ok(())
1147 }
1148
1149 fn load(&self, offset: StackOffset, width: DataWidth) -> Result<Type, String> {
1150 if offset.array_index() >= STACK_MAX_INDEX {
1151 return Err("out of bounds load".to_string());
1152 }
1153 if offset.sub_index() % width.bytes() != 0 {
1154 return Err("misaligned access".to_string());
1155 }
1156
1157 let index = offset.array_index();
1158 let loaded_type = self.get(index).clone();
1159 if width == DataWidth::U64 {
1160 Ok(loaded_type)
1161 } else {
1162 match loaded_type {
1163 Type::ScalarValue(data) => {
1164 let sub_index = offset.sub_index();
1165 let value = Self::extract_sub_value(data.value, sub_index, width.bytes());
1166 let unknown_mask =
1167 Self::extract_sub_value(data.unknown_mask, sub_index, width.bytes());
1168 let unwritten_mask =
1169 Self::extract_sub_value(data.unwritten_mask, sub_index, width.bytes());
1170 let urange = U64Range::compute_range_for_bytes_swap(
1171 0.into(),
1172 data.urange,
1173 0,
1174 sub_index,
1175 width.bytes(),
1176 );
1177 Ok(Type::ScalarValue(ScalarValueData::new(
1178 value,
1179 unknown_mask,
1180 unwritten_mask,
1181 urange,
1182 )))
1183 }
1184 _ => Err(format!("incorrect load of {} bytes", width.bytes())),
1185 }
1186 }
1187 }
1188}
1189
1190impl PartialOrd for Stack {
1193 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1194 let mut result = Ordering::Equal;
1195 let mut data_iter1 = self.data.iter().peekable();
1196 let mut data_iter2 = other.data.iter().peekable();
1197 loop {
1198 let k1 = data_iter1.peek().map(|(k, _)| *k);
1199 let k2 = data_iter2.peek().map(|(k, _)| *k);
1200 let k = match (k1, k2) {
1201 (None, None) => return Some(result),
1202 (Some(k), None) => {
1203 data_iter1.next();
1204 *k
1205 }
1206 (None, Some(k)) => {
1207 data_iter2.next();
1208 *k
1209 }
1210 (Some(k1), Some(k2)) => {
1211 if k1 <= k2 {
1212 data_iter1.next();
1213 }
1214 if k2 <= k1 {
1215 data_iter2.next();
1216 }
1217 *std::cmp::min(k1, k2)
1218 }
1219 };
1220 result = associate_orderings(result, self.get(k).partial_cmp(other.get(k))?)?;
1221 }
1222 }
1223}
1224
1225macro_rules! bpf_log {
1226 ($context:ident, $verification_context:ident, $($msg:tt)*) => {
1227 let prefix = format!("{}: ({:02x})", $context.pc, $verification_context.code[$context.pc].code());
1228 let suffix = format!($($msg)*);
1229 $verification_context.logger.log(format!("{prefix} {suffix}").as_bytes());
1230 }
1231}
1232
1233#[derive(Debug, Default)]
1235struct ComputationContext {
1236 pc: ProgramCounter,
1238 registers: [Type; GENERAL_REGISTER_COUNT as usize],
1240 stack: Stack,
1242 array_bounds: BTreeMap<MemoryId, u64>,
1244 resources: HashSet<MemoryId>,
1246 parent: Option<Arc<ComputationContext>>,
1248 dependencies: Mutex<Vec<DataDependencies>>,
1251}
1252
1253impl Clone for ComputationContext {
1254 fn clone(&self) -> Self {
1255 Self {
1256 pc: self.pc,
1257 registers: self.registers.clone(),
1258 stack: self.stack.clone(),
1259 array_bounds: self.array_bounds.clone(),
1260 resources: self.resources.clone(),
1261 parent: self.parent.clone(),
1262 dependencies: Default::default(),
1265 }
1266 }
1267}
1268
1269impl PartialEq for ComputationContext {
1271 fn eq(&self, other: &Self) -> bool {
1272 self.pc == other.pc
1273 && self.registers == other.registers
1274 && self.stack == other.stack
1275 && self.array_bounds == other.array_bounds
1276 }
1277}
1278
1279impl ComputationContext {
1280 fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
1283 for i in 0..self.registers.len() {
1284 self.registers[i].set_null(null_id, is_null);
1285 }
1286 self.stack.set_null(null_id, is_null);
1287 }
1288
1289 fn reg(&self, index: Register) -> Result<Type, String> {
1290 if index >= REGISTER_COUNT {
1291 return Err(format!("R{index} is invalid"));
1292 }
1293 if index < GENERAL_REGISTER_COUNT {
1294 Ok(self.registers[index as usize].clone())
1295 } else {
1296 Ok(Type::PtrToStack { offset: StackOffset::default() })
1297 }
1298 }
1299
1300 fn set_reg(&mut self, index: Register, reg_type: Type) -> Result<(), String> {
1301 if index >= GENERAL_REGISTER_COUNT {
1302 return Err(format!("R{index} is invalid"));
1303 }
1304 self.registers[index as usize] = reg_type;
1305 Ok(())
1306 }
1307
1308 fn update_array_bounds(&mut self, id: MemoryId, new_bound: ScalarValueData) {
1309 let new_bound_min = new_bound.min();
1310 self.array_bounds
1311 .entry(id)
1312 .and_modify(|v| *v = std::cmp::max(*v, new_bound_min))
1313 .or_insert(new_bound_min);
1314 }
1315
1316 fn get_map_schema(&self, argument: u8) -> Result<MapSchema, String> {
1317 match self.reg(argument + 1)? {
1318 Type::ConstPtrToMap { schema, .. } => Ok(schema),
1319 _ => Err(format!("No map found at argument {argument}")),
1320 }
1321 }
1322
1323 fn next(&self) -> Result<Self, String> {
1324 let parent = Some(Arc::new(self.clone()));
1325 self.jump_with_offset(0, parent)
1326 }
1327
1328 fn jump_with_offset(&self, offset: i16, parent: Option<Arc<Self>>) -> Result<Self, String> {
1331 let pc = self
1332 .pc
1333 .checked_add_signed(offset.into())
1334 .and_then(|v| v.checked_add_signed(1))
1335 .ok_or_else(|| "jump outside of program".to_string())?;
1336 let result = Self {
1337 pc,
1338 registers: self.registers.clone(),
1339 stack: self.stack.clone(),
1340 array_bounds: self.array_bounds.clone(),
1341 resources: self.resources.clone(),
1342 parent,
1343 dependencies: Default::default(),
1344 };
1345 Ok(result)
1346 }
1347
1348 fn check_memory_access(
1349 &self,
1350 dst_offset: ScalarValueData,
1351 dst_buffer_size: u64,
1352 instruction_offset: i16,
1353 width: usize,
1354 ) -> Result<(), String> {
1355 let memory_range = dst_offset.urange + instruction_offset + U64Range::new(0, width as u64);
1356 if memory_range.max > dst_buffer_size {
1357 return Err("out of bound access".to_string());
1358 }
1359 Ok(())
1360 }
1361
1362 fn store_memory(
1363 &mut self,
1364 context: &mut VerificationContext<'_>,
1365 addr: &Type,
1366 field: Field,
1367 value: Type,
1368 ) -> Result<(), String> {
1369 let addr = addr.inner(self)?;
1370 match *addr {
1371 Type::PtrToStack { offset } => {
1372 let offset_sum = offset.add(field.offset);
1373 return self.stack.store(offset_sum, value, field.width);
1374 }
1375 Type::PtrToMemory { offset, buffer_size, .. } => {
1376 self.check_memory_access(offset, buffer_size, field.offset, field.width.bytes())?;
1377 }
1378 Type::PtrToStruct { ref id, offset, ref descriptor, .. } => {
1379 let field_desc = descriptor
1380 .find_field(offset, field)
1381 .ok_or_else(|| "incorrect store".to_string())?;
1382
1383 if !matches!(field_desc.field_type, FieldType::MutableScalar { .. }) {
1384 return Err("store to a read-only field".to_string());
1385 }
1386
1387 context.register_struct_access(StructAccess {
1388 pc: self.pc,
1389 memory_id: id.clone(),
1390 field_offset: field_desc.offset,
1391 is_32_bit_ptr_load: false,
1392 })?;
1393 }
1394 Type::PtrToArray { ref id, offset } => {
1395 self.check_memory_access(
1396 offset,
1397 *self.array_bounds.get(&id).unwrap_or(&0),
1398 field.offset,
1399 field.width.bytes(),
1400 )?;
1401 }
1402 _ => return Err("incorrect store".to_string()),
1403 }
1404
1405 match value {
1406 Type::ScalarValue(data) if data.is_fully_initialized() => {}
1407 _ => return Err("incorrect store".to_string()),
1409 }
1410 Ok(())
1411 }
1412
1413 fn load_memory(
1414 &self,
1415 context: &mut VerificationContext<'_>,
1416 addr: &Type,
1417 field: Field,
1418 ) -> Result<Type, String> {
1419 let addr = addr.inner(self)?;
1420 match *addr {
1421 Type::PtrToStack { offset } => {
1422 let offset_sum = offset.add(field.offset);
1423 self.stack.load(offset_sum, field.width)
1424 }
1425 Type::PtrToMemory { ref id, offset, buffer_size, .. } => {
1426 self.check_memory_access(offset, buffer_size, field.offset, field.width.bytes())?;
1427 Ok(Type::UNKNOWN_SCALAR)
1428 }
1429 Type::PtrToStruct { ref id, offset, ref descriptor, .. } => {
1430 let field_desc = descriptor
1431 .find_field(offset, field)
1432 .ok_or_else(|| "incorrect load".to_string())?;
1433
1434 let (return_type, is_32_bit_ptr_load) = match &field_desc.field_type {
1435 FieldType::Scalar { .. } | FieldType::MutableScalar { .. } => {
1436 (Type::UNKNOWN_SCALAR, false)
1437 }
1438 FieldType::PtrToArray { id: array_id, is_32_bit } => (
1439 Type::PtrToArray { id: array_id.prepended(id.clone()), offset: 0.into() },
1440 *is_32_bit,
1441 ),
1442 FieldType::PtrToEndArray { id: array_id, is_32_bit } => {
1443 (Type::PtrToEndArray { id: array_id.prepended(id.clone()) }, *is_32_bit)
1444 }
1445 FieldType::PtrToMemory { id: memory_id, buffer_size, is_32_bit } => (
1446 Type::PtrToMemory {
1447 id: memory_id.prepended(id.clone()),
1448 offset: 0.into(),
1449 buffer_size: *buffer_size as u64,
1450 },
1451 *is_32_bit,
1452 ),
1453 FieldType::NullablePtrToMemory { id: memory_id, buffer_size, is_32_bit } => {
1454 let id = memory_id.prepended(id.clone());
1455 (
1456 Type::NullOr {
1457 id: id.clone(),
1458 inner: Box::new(Type::PtrToMemory {
1459 id,
1460 offset: 0.into(),
1461 buffer_size: *buffer_size as u64,
1462 }),
1463 },
1464 *is_32_bit,
1465 )
1466 }
1467 };
1468
1469 context.register_struct_access(StructAccess {
1470 pc: self.pc,
1471 memory_id: id.clone(),
1472 field_offset: field_desc.offset,
1473 is_32_bit_ptr_load,
1474 })?;
1475
1476 Ok(return_type)
1477 }
1478 Type::PtrToArray { ref id, offset } => {
1479 self.check_memory_access(
1480 offset,
1481 *self.array_bounds.get(&id).unwrap_or(&0),
1482 field.offset,
1483 field.width.bytes(),
1484 )?;
1485 Ok(Type::UNKNOWN_SCALAR)
1486 }
1487 _ => Err("incorrect load".to_string()),
1488 }
1489 }
1490
1491 fn resolve_return_value(
1498 &self,
1499 verification_context: &mut VerificationContext<'_>,
1500 return_value: &Type,
1501 next: &mut ComputationContext,
1502 maybe_null: bool,
1503 ) -> Result<Type, String> {
1504 match return_value {
1505 Type::AliasParameter { parameter_index } => self.reg(parameter_index + 1),
1506 Type::ReleasableParameter { id, inner } => {
1507 let id = MemoryId::from(verification_context.next_id()).prepended(id.clone());
1508 if !maybe_null {
1509 next.resources.insert(id.clone());
1510 }
1511 Ok(Type::Releasable {
1512 id,
1513 inner: Box::new(self.resolve_return_value(
1514 verification_context,
1515 inner,
1516 next,
1517 maybe_null,
1518 )?),
1519 })
1520 }
1521 Type::NullOrParameter(t) => {
1522 let id = verification_context.next_id();
1523 Ok(Type::NullOr {
1524 id: id.into(),
1525 inner: Box::new(self.resolve_return_value(
1526 verification_context,
1527 t,
1528 next,
1529 true,
1530 )?),
1531 })
1532 }
1533 Type::MapValueParameter { map_ptr_index } => {
1534 let schema = self.get_map_schema(*map_ptr_index)?;
1535 let id = verification_context.next_id();
1536 Ok(Type::PtrToMemory {
1537 id: id.into(),
1538 offset: 0.into(),
1539 buffer_size: schema.value_size as u64,
1540 })
1541 }
1542 Type::MemoryParameter { size, .. } => {
1543 let buffer_size = size.size(self)?;
1544 let id = verification_context.next_id();
1545 Ok(Type::PtrToMemory { id: id.into(), offset: 0.into(), buffer_size })
1546 }
1547 t => Ok(t.clone()),
1548 }
1549 }
1550
1551 fn compute_source(&self, src: Source) -> Result<Type, String> {
1552 match src {
1553 Source::Reg(reg) => self.reg(reg),
1554 Source::Value(v) => Ok(v.into()),
1555 }
1556 }
1557
1558 fn apply_computation(
1559 context: &ComputationContext,
1560 op1: Type,
1561 op2: Type,
1562 alu_type: AluType,
1563 op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1564 ) -> Result<Type, String> {
1565 let result: Type = match (alu_type, op1.inner(context)?, op2.inner(context)?) {
1566 (_, Type::ScalarValue(data1), Type::ScalarValue(data2)) => op(*data1, *data2).into(),
1567 (
1568 AluType::Add,
1569 Type::ScalarValue(_),
1570 Type::PtrToStack { .. } | Type::PtrToMemory { .. } | Type::PtrToStruct { .. },
1571 ) => {
1572 return Self::apply_computation(context, op2, op1, alu_type, op);
1573 }
1574 (alu_type, Type::PtrToStack { offset: x }, Type::ScalarValue(data))
1575 if alu_type.is_ptr_compatible() =>
1576 {
1577 Type::PtrToStack { offset: run_on_stack_offset(*x, |x| op(x, *data)) }
1578 }
1579 (
1580 alu_type,
1581 Type::PtrToMemory { id, offset: x, buffer_size },
1582 Type::ScalarValue(data),
1583 ) if alu_type.is_ptr_compatible() => {
1584 let offset = op(*x, *data);
1585 Type::PtrToMemory { id: id.clone(), offset, buffer_size: *buffer_size }
1586 }
1587 (
1588 alu_type,
1589 Type::PtrToStruct { id, offset: x, descriptor },
1590 Type::ScalarValue(data),
1591 ) if alu_type.is_ptr_compatible() => {
1592 let offset = op(*x, *data);
1593 Type::PtrToStruct { id: id.clone(), offset, descriptor: descriptor.clone() }
1594 }
1595 (AluType::Add, Type::PtrToArray { id, offset: x }, Type::ScalarValue(data)) => {
1596 let offset = x.checked_add(*data).ok_or_else(|| format!("XXX"))?;
1597 Type::PtrToArray { id: id.clone(), offset }
1598 }
1599 (AluType::Sub, Type::PtrToArray { id, offset: x }, Type::ScalarValue(data)) => {
1600 let offset = x.checked_sub(*data).ok_or_else(|| format!("XXX"))?;
1601 Type::PtrToArray { id: id.clone(), offset }
1602 }
1603 (
1604 AluType::Sub,
1605 Type::PtrToMemory { id: id1, offset: x1, .. },
1606 Type::PtrToMemory { id: id2, offset: x2, .. },
1607 )
1608 | (
1609 AluType::Sub,
1610 Type::PtrToStruct { id: id1, offset: x1, .. },
1611 Type::PtrToStruct { id: id2, offset: x2, .. },
1612 )
1613 | (
1614 AluType::Sub,
1615 Type::PtrToArray { id: id1, offset: x1 },
1616 Type::PtrToArray { id: id2, offset: x2 },
1617 ) if id1 == id2 => Type::from(op(*x1, *x2)),
1618 (AluType::Sub, Type::PtrToStack { offset: x1 }, Type::PtrToStack { offset: x2 }) => {
1619 Type::from(op(x1.reg(), x2.reg()))
1620 }
1621 (
1622 AluType::Sub,
1623 Type::PtrToArray { id: id1, .. },
1624 Type::PtrToEndArray { id: id2, .. },
1625 )
1626 | (
1627 AluType::Sub,
1628 Type::PtrToEndArray { id: id1, .. },
1629 Type::PtrToArray { id: id2, .. },
1630 ) if id1 == id2 => Type::UNKNOWN_SCALAR,
1631 _ => Type::default(),
1632 };
1633 Ok(result)
1634 }
1635
1636 fn alu(
1637 &mut self,
1638 op_name: Option<&str>,
1639 verification_context: &mut VerificationContext<'_>,
1640 dst: Register,
1641 src: Source,
1642 alu_type: AluType,
1643 op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1644 ) -> Result<(), String> {
1645 if let Some(op_name) = op_name {
1646 bpf_log!(
1647 self,
1648 verification_context,
1649 "{op_name} {}, {}",
1650 display_register(dst),
1651 display_source(src)
1652 );
1653 }
1654 let op1 = self.reg(dst)?;
1655 let op2 = self.compute_source(src)?;
1656 let result = Self::apply_computation(self, op1, op2, alu_type, op)?;
1657 let mut next = self.next()?;
1658 next.set_reg(dst, result)?;
1659 verification_context.states.push(next);
1660 Ok(())
1661 }
1662
1663 fn log_atomic_operation(
1664 &mut self,
1665 op_name: &str,
1666 verification_context: &mut VerificationContext<'_>,
1667 fetch: bool,
1668 dst: Register,
1669 offset: i16,
1670 src: Register,
1671 ) {
1672 bpf_log!(
1673 self,
1674 verification_context,
1675 "lock {}{} [{}{}], {}",
1676 if fetch { "fetch " } else { "" },
1677 op_name,
1678 display_register(dst),
1679 print_offset(offset),
1680 display_register(src),
1681 );
1682 }
1683
1684 fn raw_atomic_operation(
1685 &mut self,
1686 op_name: &str,
1687 verification_context: &mut VerificationContext<'_>,
1688 width: DataWidth,
1689 fetch: bool,
1690 dst: Register,
1691 offset: i16,
1692 src: Register,
1693 op: impl FnOnce(&ComputationContext, Type, Type) -> Result<Type, String>,
1694 ) -> Result<(), String> {
1695 self.log_atomic_operation(op_name, verification_context, fetch, dst, offset, src);
1696 let addr = self.reg(dst)?;
1697 let value = self.reg(src)?;
1698 let field = Field::new(offset, width);
1699 let loaded_type = self.load_memory(verification_context, &addr, field)?;
1700 let result = op(self, loaded_type.clone(), value)?;
1701 let mut next = self.next()?;
1702 next.store_memory(verification_context, &addr, field, result)?;
1703 if fetch {
1704 next.set_reg(src, loaded_type)?;
1705 }
1706 verification_context.states.push(next);
1707 Ok(())
1708 }
1709
1710 fn atomic_operation(
1711 &mut self,
1712 op_name: &str,
1713 verification_context: &mut VerificationContext<'_>,
1714 width: DataWidth,
1715 fetch: bool,
1716 dst: Register,
1717 offset: i16,
1718 src: Register,
1719 alu_type: AluType,
1720 op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1721 ) -> Result<(), String> {
1722 self.raw_atomic_operation(
1723 op_name,
1724 verification_context,
1725 width,
1726 fetch,
1727 dst,
1728 offset,
1729 src,
1730 |context: &ComputationContext, v1: Type, v2: Type| {
1731 Self::apply_computation(context, v1, v2, alu_type, op)
1732 },
1733 )
1734 }
1735
1736 fn raw_atomic_cmpxchg(
1737 &mut self,
1738 op_name: &str,
1739 verification_context: &mut VerificationContext<'_>,
1740 dst: Register,
1741 offset: i16,
1742 src: Register,
1743 jump_width: JumpWidth,
1744 op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1745 ) -> Result<(), String> {
1746 self.log_atomic_operation(op_name, verification_context, true, dst, offset, src);
1747 let width = match jump_width {
1748 JumpWidth::W32 => DataWidth::U32,
1749 JumpWidth::W64 => DataWidth::U64,
1750 };
1751 let addr = self.reg(dst)?;
1752 let field = Field::new(offset, width);
1753 let dst = self.load_memory(verification_context, &addr, field)?;
1754 let value = self.reg(src)?;
1755 let r0 = self.reg(0)?;
1756 let branch = self.compute_branch(jump_width, &dst, &r0, op)?;
1757 if branch.unwrap_or(true) {
1759 let mut next = self.next()?;
1760 let (dst, r0) =
1761 Type::constraint(&mut next, JumpType::Eq, jump_width, dst.clone(), r0.clone())?;
1762 next.set_reg(0, dst)?;
1763 next.store_memory(verification_context, &addr, field, value)?;
1764 verification_context.states.push(next);
1765 }
1766 if !branch.unwrap_or(false) {
1768 let mut next = self.next()?;
1769 let (dst, r0) = Type::constraint(&mut next, JumpType::Ne, jump_width, dst, r0)?;
1770 next.set_reg(0, dst.clone())?;
1771 next.store_memory(verification_context, &addr, field, dst)?;
1772 verification_context.states.push(next);
1773 }
1774
1775 Ok(())
1776 }
1777 fn endianness<BO: ByteOrder>(
1778 &mut self,
1779 op_name: &str,
1780 verification_context: &mut VerificationContext<'_>,
1781 dst: Register,
1782 width: DataWidth,
1783 ) -> Result<(), String> {
1784 bpf_log!(self, verification_context, "{op_name}{} {}", width.bits(), display_register(dst),);
1785 let bit_op = |value: u64| match width {
1786 DataWidth::U16 => BO::read_u16((value as u16).as_bytes()) as u64,
1787 DataWidth::U32 => BO::read_u32((value as u32).as_bytes()) as u64,
1788 DataWidth::U64 => BO::read_u64(value.as_bytes()),
1789 _ => {
1790 panic!("Unexpected bit width for endianness operation");
1791 }
1792 };
1793 let value = self.reg(dst)?;
1794 let new_value = match value {
1795 Type::ScalarValue(data) => Type::ScalarValue(ScalarValueData::new(
1796 bit_op(data.value),
1797 bit_op(data.unknown_mask),
1798 bit_op(data.unwritten_mask),
1799 U64Range::max(),
1800 )),
1801 _ => Type::default(),
1802 };
1803 let mut next = self.next()?;
1804 next.set_reg(dst, new_value)?;
1805 verification_context.states.push(next);
1806 Ok(())
1807 }
1808
1809 fn compute_branch(
1810 &self,
1811 jump_width: JumpWidth,
1812 op1: &Type,
1813 op2: &Type,
1814 op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1815 ) -> Result<Option<bool>, String> {
1816 match (jump_width, op1, op2) {
1817 (_, Type::ScalarValue(data1), Type::ScalarValue(data2)) => op(*data1, *data2),
1818 (JumpWidth::W64, Type::ScalarValue(data), Type::NullOr { .. })
1819 | (JumpWidth::W64, Type::NullOr { .. }, Type::ScalarValue(data))
1820 if data.is_zero() =>
1821 {
1822 Ok(None)
1823 }
1824
1825 (JumpWidth::W64, Type::ScalarValue(data), t) if data.is_zero() && t.is_non_zero() => {
1826 op(1.into(), 0.into())
1827 }
1828
1829 (JumpWidth::W64, t, Type::ScalarValue(data)) if data.is_zero() && t.is_non_zero() => {
1830 op(0.into(), 1.into())
1831 }
1832
1833 (JumpWidth::W64, Type::PtrToStack { offset: x }, Type::PtrToStack { offset: y }) => {
1834 op(x.reg(), y.reg())
1835 }
1836
1837 (
1838 JumpWidth::W64,
1839 Type::PtrToMemory { id: id1, offset: x, .. },
1840 Type::PtrToMemory { id: id2, offset: y, .. },
1841 )
1842 | (
1843 JumpWidth::W64,
1844 Type::PtrToStruct { id: id1, offset: x, .. },
1845 Type::PtrToStruct { id: id2, offset: y, .. },
1846 )
1847 | (
1848 JumpWidth::W64,
1849 Type::PtrToArray { id: id1, offset: x, .. },
1850 Type::PtrToArray { id: id2, offset: y, .. },
1851 ) if *id1 == *id2 => op(*x, *y),
1852
1853 (JumpWidth::W64, Type::PtrToArray { id: id1, .. }, Type::PtrToEndArray { id: id2 })
1854 | (JumpWidth::W64, Type::PtrToEndArray { id: id1 }, Type::PtrToArray { id: id2, .. })
1855 if *id1 == *id2 =>
1856 {
1857 Ok(None)
1858 }
1859
1860 _ => Err(()),
1861 }
1862 .map_err(|_| "non permitted comparison".to_string())
1863 }
1864
1865 fn conditional_jump(
1866 &mut self,
1867 op_name: &str,
1868 verification_context: &mut VerificationContext<'_>,
1869 dst: Register,
1870 src: Source,
1871 offset: i16,
1872 jump_type: JumpType,
1873 jump_width: JumpWidth,
1874 op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1875 ) -> Result<(), String> {
1876 bpf_log!(
1877 self,
1878 verification_context,
1879 "{op_name} {}, {}, {}",
1880 display_register(dst),
1881 display_source(src),
1882 if offset == 0 { format!("0") } else { print_offset(offset) },
1883 );
1884 let op1 = self.reg(dst)?;
1885 let op2 = self.compute_source(src.clone())?;
1886 let apply_constraints_and_register = |mut next: Self,
1887 jump_type: JumpType|
1888 -> Result<Self, String> {
1889 if jump_type != JumpType::Unknown {
1890 let (new_op1, new_op2) =
1891 Type::constraint(&mut next, jump_type, jump_width, op1.clone(), op2.clone())?;
1892 if dst < REGISTER_COUNT {
1893 next.set_reg(dst, new_op1)?;
1894 }
1895 match src {
1896 Source::Reg(r) => {
1897 next.set_reg(r, new_op2)?;
1898 }
1899 _ => {
1900 }
1902 }
1903 }
1904 Ok(next)
1905 };
1906 let branch = self.compute_branch(jump_width, &op1, &op2, op)?;
1907 let parent = Some(Arc::new(self.clone()));
1908 if branch.unwrap_or(true) {
1909 verification_context.states.push(apply_constraints_and_register(
1911 self.jump_with_offset(offset, parent.clone())?,
1912 jump_type,
1913 )?);
1914 }
1915 if !branch.unwrap_or(false) {
1916 verification_context.states.push(apply_constraints_and_register(
1918 self.jump_with_offset(0, parent)?,
1919 jump_type.invert(),
1920 )?);
1921 }
1922 Ok(())
1923 }
1924
1925 fn terminate(self, verification_context: &mut VerificationContext<'_>) -> Result<(), String> {
1941 let mut next = Some(self);
1942 while let Some(mut current) = next.take() {
1944 let parent = current.parent.take();
1947
1948 let mut dependencies = DataDependencies::default();
1951 for dependency in current.dependencies.get_mut().iter() {
1952 dependencies.merge(dependency);
1953 }
1954
1955 dependencies.visit(
1956 &mut DataDependenciesVisitorContext {
1957 calling_context: &verification_context.calling_context,
1958 computation_context: ¤t,
1959 },
1960 verification_context.code[current.pc],
1961 )?;
1962
1963 for register in 0..GENERAL_REGISTER_COUNT {
1965 if !dependencies.registers.contains(®ister) {
1966 current.set_reg(register, Default::default())?;
1967 }
1968 }
1969 current.stack.data.retain(|k, _| dependencies.stack.contains(k));
1970
1971 let terminating_contexts =
1973 verification_context.terminating_contexts.entry(current.pc).or_default();
1974 let mut is_dominated = false;
1975 terminating_contexts.retain(|c| match c.computation_context.partial_cmp(¤t) {
1976 Some(Ordering::Less) => false,
1977 Some(Ordering::Equal) | Some(Ordering::Greater) => {
1978 is_dominated = true;
1981 true
1982 }
1983 _ => true,
1984 });
1985 if !is_dominated {
1986 terminating_contexts.push(TerminatingContext {
1987 computation_context: current,
1988 dependencies: dependencies.clone(),
1989 });
1990 }
1991
1992 if let Some(parent) = parent {
1995 parent.dependencies.lock().push(dependencies);
1996 next = Arc::into_inner(parent);
1999 }
2000 }
2001 Ok(())
2002 }
2003}
2004
2005impl Drop for ComputationContext {
2006 fn drop(&mut self) {
2007 let mut next = self.parent.take().and_then(Arc::into_inner);
2008 while let Some(mut current) = next {
2010 next = current.parent.take().and_then(Arc::into_inner);
2011 }
2012 }
2013}
2014
2015impl PartialOrd for ComputationContext {
2018 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2019 if self.pc != other.pc || self.resources != other.resources {
2020 return None;
2021 }
2022 let mut result = self.stack.partial_cmp(&other.stack)?;
2023 result = associate_orderings(
2024 result,
2025 Type::compare_list(self.registers.iter(), other.registers.iter())?,
2026 )?;
2027 let mut array_bound_iter1 = self.array_bounds.iter().peekable();
2028 let mut array_bound_iter2 = other.array_bounds.iter().peekable();
2029 let result = loop {
2030 match (array_bound_iter1.peek().cloned(), array_bound_iter2.peek().cloned()) {
2031 (None, None) => break result,
2032 (None, _) => break associate_orderings(result, Ordering::Greater)?,
2033 (_, None) => break associate_orderings(result, Ordering::Less)?,
2034 (Some((k1, v1)), Some((k2, v2))) => match k1.cmp(k2) {
2035 Ordering::Equal => {
2036 array_bound_iter1.next();
2037 array_bound_iter2.next();
2038 result = associate_orderings(result, v2.cmp(v1))?;
2039 }
2040 v @ Ordering::Less => {
2041 array_bound_iter1.next();
2042 result = associate_orderings(result, v)?;
2043 }
2044 v @ Ordering::Greater => {
2045 array_bound_iter2.next();
2046 result = associate_orderings(result, v)?;
2047 }
2048 },
2049 }
2050 };
2051 Some(result)
2052 }
2053}
2054
2055#[derive(Clone, Debug, Default)]
2064struct DataDependencies {
2065 registers: HashSet<Register>,
2067 stack: HashSet<usize>,
2069}
2070
2071impl DataDependencies {
2072 fn merge(&mut self, other: &DataDependencies) {
2073 self.registers.extend(other.registers.iter());
2074 self.stack.extend(other.stack.iter());
2075 }
2076
2077 fn alu(&mut self, dst: Register, src: Source) -> Result<(), String> {
2078 if self.registers.contains(&dst) {
2080 if let Source::Reg(src) = src {
2081 self.registers.insert(src);
2082 }
2083 }
2084 Ok(())
2085 }
2086
2087 fn jmp(&mut self, dst: Register, src: Source) -> Result<(), String> {
2088 self.registers.insert(dst);
2089 if let Source::Reg(src) = src {
2090 self.registers.insert(src);
2091 }
2092 Ok(())
2093 }
2094
2095 fn atomic(
2096 &mut self,
2097 context: &ComputationContext,
2098 fetch: bool,
2099 dst: Register,
2100 offset: i16,
2101 src: Register,
2102 width: DataWidth,
2103 is_cmpxchg: bool,
2104 ) -> Result<(), String> {
2105 let mut is_read = false;
2106 if is_cmpxchg && self.registers.contains(&0) {
2107 is_read = true;
2108 }
2109 if fetch && self.registers.contains(&src) {
2110 is_read = true;
2111 }
2112 let addr = context.reg(dst)?;
2113 if let Type::PtrToStack { offset: stack_offset } = addr {
2114 let stack_offset = stack_offset.add(offset);
2115 if !stack_offset.is_valid() {
2116 return Err(format!("Invalid stack offset at {}", context.pc));
2117 }
2118 if is_read || self.stack.contains(&stack_offset.array_index()) {
2119 is_read = true;
2120 self.stack.insert(stack_offset.array_index());
2121 }
2122 }
2123 if is_read {
2124 self.registers.insert(0);
2125 self.registers.insert(src);
2126 }
2127 self.registers.insert(dst);
2128 Ok(())
2129 }
2130}
2131
2132struct DataDependenciesVisitorContext<'a> {
2133 calling_context: &'a CallingContext,
2134 computation_context: &'a ComputationContext,
2135}
2136
2137impl BpfVisitor for DataDependencies {
2138 type Context<'a> = DataDependenciesVisitorContext<'a>;
2139
2140 fn add<'a>(
2141 &mut self,
2142 _context: &mut Self::Context<'a>,
2143 dst: Register,
2144 src: Source,
2145 ) -> Result<(), String> {
2146 self.alu(dst, src)
2147 }
2148 fn add64<'a>(
2149 &mut self,
2150 _context: &mut Self::Context<'a>,
2151 dst: Register,
2152 src: Source,
2153 ) -> Result<(), String> {
2154 self.alu(dst, src)
2155 }
2156 fn and<'a>(
2157 &mut self,
2158 _context: &mut Self::Context<'a>,
2159 dst: Register,
2160 src: Source,
2161 ) -> Result<(), String> {
2162 self.alu(dst, src)
2163 }
2164 fn and64<'a>(
2165 &mut self,
2166 _context: &mut Self::Context<'a>,
2167 dst: Register,
2168 src: Source,
2169 ) -> Result<(), String> {
2170 self.alu(dst, src)
2171 }
2172 fn arsh<'a>(
2173 &mut self,
2174 _context: &mut Self::Context<'a>,
2175 dst: Register,
2176 src: Source,
2177 ) -> Result<(), String> {
2178 self.alu(dst, src)
2179 }
2180 fn arsh64<'a>(
2181 &mut self,
2182 _context: &mut Self::Context<'a>,
2183 dst: Register,
2184 src: Source,
2185 ) -> Result<(), String> {
2186 self.alu(dst, src)
2187 }
2188 fn div<'a>(
2189 &mut self,
2190 _context: &mut Self::Context<'a>,
2191 dst: Register,
2192 src: Source,
2193 ) -> Result<(), String> {
2194 self.alu(dst, src)
2195 }
2196 fn div64<'a>(
2197 &mut self,
2198 _context: &mut Self::Context<'a>,
2199 dst: Register,
2200 src: Source,
2201 ) -> Result<(), String> {
2202 self.alu(dst, src)
2203 }
2204 fn lsh<'a>(
2205 &mut self,
2206 _context: &mut Self::Context<'a>,
2207 dst: Register,
2208 src: Source,
2209 ) -> Result<(), String> {
2210 self.alu(dst, src)
2211 }
2212 fn lsh64<'a>(
2213 &mut self,
2214 _context: &mut Self::Context<'a>,
2215 dst: Register,
2216 src: Source,
2217 ) -> Result<(), String> {
2218 self.alu(dst, src)
2219 }
2220 fn r#mod<'a>(
2221 &mut self,
2222 _context: &mut Self::Context<'a>,
2223 dst: Register,
2224 src: Source,
2225 ) -> Result<(), String> {
2226 self.alu(dst, src)
2227 }
2228 fn mod64<'a>(
2229 &mut self,
2230 _context: &mut Self::Context<'a>,
2231 dst: Register,
2232 src: Source,
2233 ) -> Result<(), String> {
2234 self.alu(dst, src)
2235 }
2236 fn mul<'a>(
2237 &mut self,
2238 _context: &mut Self::Context<'a>,
2239 dst: Register,
2240 src: Source,
2241 ) -> Result<(), String> {
2242 self.alu(dst, src)
2243 }
2244 fn mul64<'a>(
2245 &mut self,
2246 _context: &mut Self::Context<'a>,
2247 dst: Register,
2248 src: Source,
2249 ) -> Result<(), String> {
2250 self.alu(dst, src)
2251 }
2252 fn or<'a>(
2253 &mut self,
2254 _context: &mut Self::Context<'a>,
2255 dst: Register,
2256 src: Source,
2257 ) -> Result<(), String> {
2258 self.alu(dst, src)
2259 }
2260 fn or64<'a>(
2261 &mut self,
2262 _context: &mut Self::Context<'a>,
2263 dst: Register,
2264 src: Source,
2265 ) -> Result<(), String> {
2266 self.alu(dst, src)
2267 }
2268 fn rsh<'a>(
2269 &mut self,
2270 _context: &mut Self::Context<'a>,
2271 dst: Register,
2272 src: Source,
2273 ) -> Result<(), String> {
2274 self.alu(dst, src)
2275 }
2276 fn rsh64<'a>(
2277 &mut self,
2278 _context: &mut Self::Context<'a>,
2279 dst: Register,
2280 src: Source,
2281 ) -> Result<(), String> {
2282 self.alu(dst, src)
2283 }
2284 fn sub<'a>(
2285 &mut self,
2286 _context: &mut Self::Context<'a>,
2287 dst: Register,
2288 src: Source,
2289 ) -> Result<(), String> {
2290 self.alu(dst, src)
2291 }
2292 fn sub64<'a>(
2293 &mut self,
2294 _context: &mut Self::Context<'a>,
2295 dst: Register,
2296 src: Source,
2297 ) -> Result<(), String> {
2298 self.alu(dst, src)
2299 }
2300 fn xor<'a>(
2301 &mut self,
2302 _context: &mut Self::Context<'a>,
2303 dst: Register,
2304 src: Source,
2305 ) -> Result<(), String> {
2306 self.alu(dst, src)
2307 }
2308 fn xor64<'a>(
2309 &mut self,
2310 _context: &mut Self::Context<'a>,
2311 dst: Register,
2312 src: Source,
2313 ) -> Result<(), String> {
2314 self.alu(dst, src)
2315 }
2316
2317 fn mov<'a>(
2318 &mut self,
2319 _context: &mut Self::Context<'a>,
2320 dst: Register,
2321 src: Source,
2322 ) -> Result<(), String> {
2323 if src == Source::Reg(dst) || !self.registers.contains(&dst) {
2324 return Ok(());
2325 }
2326 if let Source::Reg(src) = src {
2327 self.registers.insert(src);
2328 }
2329 self.registers.remove(&dst);
2330 Ok(())
2331 }
2332 fn mov64<'a>(
2333 &mut self,
2334 context: &mut Self::Context<'a>,
2335 dst: Register,
2336 src: Source,
2337 ) -> Result<(), String> {
2338 self.mov(context, dst, src)
2339 }
2340
2341 fn neg<'a>(&mut self, _context: &mut Self::Context<'a>, _dst: Register) -> Result<(), String> {
2342 Ok(())
2344 }
2345 fn neg64<'a>(
2346 &mut self,
2347 _context: &mut Self::Context<'a>,
2348 _dst: Register,
2349 ) -> Result<(), String> {
2350 Ok(())
2352 }
2353
2354 fn be<'a>(
2355 &mut self,
2356 _context: &mut Self::Context<'a>,
2357 _dst: Register,
2358 _width: DataWidth,
2359 ) -> Result<(), String> {
2360 Ok(())
2362 }
2363 fn le<'a>(
2364 &mut self,
2365 _context: &mut Self::Context<'a>,
2366 _dst: Register,
2367 _width: DataWidth,
2368 ) -> Result<(), String> {
2369 Ok(())
2371 }
2372
2373 fn call_external<'a>(
2374 &mut self,
2375 context: &mut Self::Context<'a>,
2376 index: u32,
2377 ) -> Result<(), String> {
2378 let Some(signature) = context.calling_context.helpers.get(&index).cloned() else {
2379 return Err(format!("unknown external function {}", index));
2380 };
2381 for register in 0..signature.args.len() + 1 {
2383 self.registers.remove(&(register as Register));
2384 }
2385 for register in 0..signature.args.len() {
2387 self.registers.insert((register + 1) as Register);
2388 }
2389 Ok(())
2390 }
2391
2392 fn exit<'a>(&mut self, _context: &mut Self::Context<'a>) -> Result<(), String> {
2393 self.registers.insert(0);
2395 Ok(())
2396 }
2397
2398 fn jump<'a>(&mut self, _context: &mut Self::Context<'a>, _offset: i16) -> Result<(), String> {
2399 Ok(())
2401 }
2402
2403 fn jeq<'a>(
2404 &mut self,
2405 _context: &mut Self::Context<'a>,
2406 dst: Register,
2407 src: Source,
2408 offset: i16,
2409 ) -> Result<(), String> {
2410 self.jmp(dst, src)
2411 }
2412 fn jeq64<'a>(
2413 &mut self,
2414 _context: &mut Self::Context<'a>,
2415 dst: Register,
2416 src: Source,
2417 offset: i16,
2418 ) -> Result<(), String> {
2419 self.jmp(dst, src)
2420 }
2421 fn jne<'a>(
2422 &mut self,
2423 _context: &mut Self::Context<'a>,
2424 dst: Register,
2425 src: Source,
2426 offset: i16,
2427 ) -> Result<(), String> {
2428 self.jmp(dst, src)
2429 }
2430 fn jne64<'a>(
2431 &mut self,
2432 _context: &mut Self::Context<'a>,
2433 dst: Register,
2434 src: Source,
2435 offset: i16,
2436 ) -> Result<(), String> {
2437 self.jmp(dst, src)
2438 }
2439 fn jge<'a>(
2440 &mut self,
2441 _context: &mut Self::Context<'a>,
2442 dst: Register,
2443 src: Source,
2444 offset: i16,
2445 ) -> Result<(), String> {
2446 self.jmp(dst, src)
2447 }
2448 fn jge64<'a>(
2449 &mut self,
2450 _context: &mut Self::Context<'a>,
2451 dst: Register,
2452 src: Source,
2453 offset: i16,
2454 ) -> Result<(), String> {
2455 self.jmp(dst, src)
2456 }
2457 fn jgt<'a>(
2458 &mut self,
2459 _context: &mut Self::Context<'a>,
2460 dst: Register,
2461 src: Source,
2462 offset: i16,
2463 ) -> Result<(), String> {
2464 self.jmp(dst, src)
2465 }
2466 fn jgt64<'a>(
2467 &mut self,
2468 _context: &mut Self::Context<'a>,
2469 dst: Register,
2470 src: Source,
2471 offset: i16,
2472 ) -> Result<(), String> {
2473 self.jmp(dst, src)
2474 }
2475 fn jle<'a>(
2476 &mut self,
2477 _context: &mut Self::Context<'a>,
2478 dst: Register,
2479 src: Source,
2480 offset: i16,
2481 ) -> Result<(), String> {
2482 self.jmp(dst, src)
2483 }
2484 fn jle64<'a>(
2485 &mut self,
2486 _context: &mut Self::Context<'a>,
2487 dst: Register,
2488 src: Source,
2489 offset: i16,
2490 ) -> Result<(), String> {
2491 self.jmp(dst, src)
2492 }
2493 fn jlt<'a>(
2494 &mut self,
2495 _context: &mut Self::Context<'a>,
2496 dst: Register,
2497 src: Source,
2498 offset: i16,
2499 ) -> Result<(), String> {
2500 self.jmp(dst, src)
2501 }
2502 fn jlt64<'a>(
2503 &mut self,
2504 _context: &mut Self::Context<'a>,
2505 dst: Register,
2506 src: Source,
2507 offset: i16,
2508 ) -> Result<(), String> {
2509 self.jmp(dst, src)
2510 }
2511 fn jsge<'a>(
2512 &mut self,
2513 _context: &mut Self::Context<'a>,
2514 dst: Register,
2515 src: Source,
2516 offset: i16,
2517 ) -> Result<(), String> {
2518 self.jmp(dst, src)
2519 }
2520 fn jsge64<'a>(
2521 &mut self,
2522 _context: &mut Self::Context<'a>,
2523 dst: Register,
2524 src: Source,
2525 offset: i16,
2526 ) -> Result<(), String> {
2527 self.jmp(dst, src)
2528 }
2529 fn jsgt<'a>(
2530 &mut self,
2531 _context: &mut Self::Context<'a>,
2532 dst: Register,
2533 src: Source,
2534 offset: i16,
2535 ) -> Result<(), String> {
2536 self.jmp(dst, src)
2537 }
2538 fn jsgt64<'a>(
2539 &mut self,
2540 _context: &mut Self::Context<'a>,
2541 dst: Register,
2542 src: Source,
2543 offset: i16,
2544 ) -> Result<(), String> {
2545 self.jmp(dst, src)
2546 }
2547 fn jsle<'a>(
2548 &mut self,
2549 _context: &mut Self::Context<'a>,
2550 dst: Register,
2551 src: Source,
2552 offset: i16,
2553 ) -> Result<(), String> {
2554 self.jmp(dst, src)
2555 }
2556 fn jsle64<'a>(
2557 &mut self,
2558 _context: &mut Self::Context<'a>,
2559 dst: Register,
2560 src: Source,
2561 offset: i16,
2562 ) -> Result<(), String> {
2563 self.jmp(dst, src)
2564 }
2565 fn jslt<'a>(
2566 &mut self,
2567 _context: &mut Self::Context<'a>,
2568 dst: Register,
2569 src: Source,
2570 offset: i16,
2571 ) -> Result<(), String> {
2572 self.jmp(dst, src)
2573 }
2574 fn jslt64<'a>(
2575 &mut self,
2576 _context: &mut Self::Context<'a>,
2577 dst: Register,
2578 src: Source,
2579 offset: i16,
2580 ) -> Result<(), String> {
2581 self.jmp(dst, src)
2582 }
2583 fn jset<'a>(
2584 &mut self,
2585 _context: &mut Self::Context<'a>,
2586 dst: Register,
2587 src: Source,
2588 offset: i16,
2589 ) -> Result<(), String> {
2590 self.jmp(dst, src)
2591 }
2592 fn jset64<'a>(
2593 &mut self,
2594 _context: &mut Self::Context<'a>,
2595 dst: Register,
2596 src: Source,
2597 offset: i16,
2598 ) -> Result<(), String> {
2599 self.jmp(dst, src)
2600 }
2601
2602 fn atomic_add<'a>(
2603 &mut self,
2604 context: &mut Self::Context<'a>,
2605 fetch: bool,
2606 dst: Register,
2607 offset: i16,
2608 src: Register,
2609 ) -> Result<(), String> {
2610 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2611 }
2612
2613 fn atomic_add64<'a>(
2614 &mut self,
2615 context: &mut Self::Context<'a>,
2616 fetch: bool,
2617 dst: Register,
2618 offset: i16,
2619 src: Register,
2620 ) -> Result<(), String> {
2621 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2622 }
2623
2624 fn atomic_and<'a>(
2625 &mut self,
2626 context: &mut Self::Context<'a>,
2627 fetch: bool,
2628 dst: Register,
2629 offset: i16,
2630 src: Register,
2631 ) -> Result<(), String> {
2632 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2633 }
2634
2635 fn atomic_and64<'a>(
2636 &mut self,
2637 context: &mut Self::Context<'a>,
2638 fetch: bool,
2639 dst: Register,
2640 offset: i16,
2641 src: Register,
2642 ) -> Result<(), String> {
2643 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2644 }
2645
2646 fn atomic_or<'a>(
2647 &mut self,
2648 context: &mut Self::Context<'a>,
2649 fetch: bool,
2650 dst: Register,
2651 offset: i16,
2652 src: Register,
2653 ) -> Result<(), String> {
2654 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2655 }
2656
2657 fn atomic_or64<'a>(
2658 &mut self,
2659 context: &mut Self::Context<'a>,
2660 fetch: bool,
2661 dst: Register,
2662 offset: i16,
2663 src: Register,
2664 ) -> Result<(), String> {
2665 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2666 }
2667
2668 fn atomic_xor<'a>(
2669 &mut self,
2670 context: &mut Self::Context<'a>,
2671 fetch: bool,
2672 dst: Register,
2673 offset: i16,
2674 src: Register,
2675 ) -> Result<(), String> {
2676 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2677 }
2678
2679 fn atomic_xor64<'a>(
2680 &mut self,
2681 context: &mut Self::Context<'a>,
2682 fetch: bool,
2683 dst: Register,
2684 offset: i16,
2685 src: Register,
2686 ) -> Result<(), String> {
2687 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2688 }
2689
2690 fn atomic_xchg<'a>(
2691 &mut self,
2692 context: &mut Self::Context<'a>,
2693 fetch: bool,
2694 dst: Register,
2695 offset: i16,
2696 src: Register,
2697 ) -> Result<(), String> {
2698 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2699 }
2700
2701 fn atomic_xchg64<'a>(
2702 &mut self,
2703 context: &mut Self::Context<'a>,
2704 fetch: bool,
2705 dst: Register,
2706 offset: i16,
2707 src: Register,
2708 ) -> Result<(), String> {
2709 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2710 }
2711
2712 fn atomic_cmpxchg<'a>(
2713 &mut self,
2714 context: &mut Self::Context<'a>,
2715 dst: Register,
2716 offset: i16,
2717 src: Register,
2718 ) -> Result<(), String> {
2719 self.atomic(&context.computation_context, true, dst, offset, src, DataWidth::U32, true)
2720 }
2721
2722 fn atomic_cmpxchg64<'a>(
2723 &mut self,
2724 context: &mut Self::Context<'a>,
2725 dst: Register,
2726 offset: i16,
2727 src: Register,
2728 ) -> Result<(), String> {
2729 self.atomic(&context.computation_context, true, dst, offset, src, DataWidth::U64, true)
2730 }
2731
2732 fn load<'a>(
2733 &mut self,
2734 context: &mut Self::Context<'a>,
2735 dst: Register,
2736 offset: i16,
2737 src: Register,
2738 width: DataWidth,
2739 ) -> Result<(), String> {
2740 let context = &context.computation_context;
2741 if self.registers.contains(&dst) {
2742 let addr = context.reg(src)?;
2743 if let Type::PtrToStack { offset: stack_offset } = addr {
2744 let stack_offset = stack_offset.add(offset);
2745 if !stack_offset.is_valid() {
2746 return Err(format!("Invalid stack offset at {}", context.pc));
2747 }
2748 self.stack.insert(stack_offset.array_index());
2749 }
2750 }
2751 self.registers.insert(src);
2752 Ok(())
2753 }
2754
2755 fn load64<'a>(
2756 &mut self,
2757 _context: &mut Self::Context<'a>,
2758 dst: Register,
2759 _src: u8,
2760 _lower: u32,
2761 ) -> Result<(), String> {
2762 self.registers.remove(&dst);
2763 Ok(())
2764 }
2765
2766 fn load_from_packet<'a>(
2767 &mut self,
2768 _context: &mut Self::Context<'a>,
2769 dst: Register,
2770 src: Register,
2771 _offset: i32,
2772 register_offset: Option<Register>,
2773 _width: DataWidth,
2774 ) -> Result<(), String> {
2775 for register in 1..6 {
2777 self.registers.remove(&(register as Register));
2778 }
2779 if self.registers.remove(&dst) {
2781 self.registers.insert(src);
2782 if let Some(reg) = register_offset {
2783 self.registers.insert(reg);
2784 }
2785 }
2786 Ok(())
2787 }
2788
2789 fn store<'a>(
2790 &mut self,
2791 context: &mut Self::Context<'a>,
2792 dst: Register,
2793 offset: i16,
2794 src: Source,
2795 width: DataWidth,
2796 ) -> Result<(), String> {
2797 let context = &context.computation_context;
2798 let addr = context.reg(dst)?;
2799 if let Type::PtrToStack { offset: stack_offset } = addr {
2800 let stack_offset = stack_offset.add(offset);
2801 if !stack_offset.is_valid() {
2802 return Err(format!("Invalid stack offset at {}", context.pc));
2803 }
2804 if self.stack.remove(&stack_offset.array_index()) {
2805 if let Source::Reg(src) = src {
2806 self.registers.insert(src);
2807 }
2808 }
2809 } else {
2810 if let Source::Reg(src) = src {
2811 self.registers.insert(src);
2812 }
2813 }
2814
2815 Ok(())
2816 }
2817}
2818
2819#[derive(Debug)]
2820struct TerminatingContext {
2821 computation_context: ComputationContext,
2822 dependencies: DataDependencies,
2823}
2824
2825#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2826enum AluType {
2827 Plain,
2828 Sub,
2829 Add,
2830}
2831
2832impl AluType {
2833 fn is_ptr_compatible(&self) -> bool {
2835 match self {
2836 Self::Sub | Self::Add => true,
2837 _ => false,
2838 }
2839 }
2840}
2841
2842#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2843enum JumpWidth {
2844 W32,
2845 W64,
2846}
2847
2848#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2849enum JumpType {
2850 Eq,
2851 Ge,
2852 Gt,
2853 Le,
2854 LooseComparaison,
2855 Lt,
2856 Ne,
2857 StrictComparaison,
2858 Unknown,
2859}
2860
2861impl JumpType {
2862 fn invert(&self) -> Self {
2863 match self {
2864 Self::Eq => Self::Ne,
2865 Self::Ge => Self::Lt,
2866 Self::Gt => Self::Le,
2867 Self::Le => Self::Gt,
2868 Self::LooseComparaison => Self::StrictComparaison,
2869 Self::Lt => Self::Ge,
2870 Self::Ne => Self::Eq,
2871 Self::StrictComparaison => Self::LooseComparaison,
2872 Self::Unknown => Self::Unknown,
2873 }
2874 }
2875
2876 fn is_strict(&self) -> bool {
2877 match self {
2878 Self::Gt | Self::Lt | Self::Ne | Self::StrictComparaison => true,
2879 _ => false,
2880 }
2881 }
2882}
2883
2884fn display_register(register: Register) -> String {
2885 format!("%r{register}")
2886}
2887
2888fn display_source(src: Source) -> String {
2889 match src {
2890 Source::Reg(r) => display_register(r),
2891 Source::Value(v) => format!("0x{v:x}"),
2892 }
2893}
2894
2895impl BpfVisitor for ComputationContext {
2896 type Context<'a> = VerificationContext<'a>;
2897
2898 fn add<'a>(
2899 &mut self,
2900 context: &mut Self::Context<'a>,
2901 dst: Register,
2902 src: Source,
2903 ) -> Result<(), String> {
2904 self.alu(Some("add32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x + y))
2905 }
2906 fn add64<'a>(
2907 &mut self,
2908 context: &mut Self::Context<'a>,
2909 dst: Register,
2910 src: Source,
2911 ) -> Result<(), String> {
2912 self.alu(Some("add"), context, dst, src, AluType::Add, |x, y| x + y)
2913 }
2914 fn and<'a>(
2915 &mut self,
2916 context: &mut Self::Context<'a>,
2917 dst: Register,
2918 src: Source,
2919 ) -> Result<(), String> {
2920 self.alu(Some("and32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x & y))
2921 }
2922 fn and64<'a>(
2923 &mut self,
2924 context: &mut Self::Context<'a>,
2925 dst: Register,
2926 src: Source,
2927 ) -> Result<(), String> {
2928 self.alu(Some("and"), context, dst, src, AluType::Plain, |x, y| x & y)
2929 }
2930 fn arsh<'a>(
2931 &mut self,
2932 context: &mut Self::Context<'a>,
2933 dst: Register,
2934 src: Source,
2935 ) -> Result<(), String> {
2936 self.alu(Some("arsh32"), context, dst, src, AluType::Plain, |x, y| {
2937 alu32(x, y, |x, y| x.ashr(y))
2938 })
2939 }
2940 fn arsh64<'a>(
2941 &mut self,
2942 context: &mut Self::Context<'a>,
2943 dst: Register,
2944 src: Source,
2945 ) -> Result<(), String> {
2946 self.alu(Some("arsh"), context, dst, src, AluType::Plain, |x, y| x.ashr(y))
2947 }
2948 fn div<'a>(
2949 &mut self,
2950 context: &mut Self::Context<'a>,
2951 dst: Register,
2952 src: Source,
2953 ) -> Result<(), String> {
2954 self.alu(Some("div32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x / y))
2955 }
2956 fn div64<'a>(
2957 &mut self,
2958 context: &mut Self::Context<'a>,
2959 dst: Register,
2960 src: Source,
2961 ) -> Result<(), String> {
2962 self.alu(Some("div"), context, dst, src, AluType::Plain, |x, y| x / y)
2963 }
2964 fn lsh<'a>(
2965 &mut self,
2966 context: &mut Self::Context<'a>,
2967 dst: Register,
2968 src: Source,
2969 ) -> Result<(), String> {
2970 self.alu(Some("lsh32"), context, dst, src, AluType::Plain, |x, y| {
2971 alu32(x, y, |x, y| x << y)
2972 })
2973 }
2974 fn lsh64<'a>(
2975 &mut self,
2976 context: &mut Self::Context<'a>,
2977 dst: Register,
2978 src: Source,
2979 ) -> Result<(), String> {
2980 self.alu(Some("lsh"), context, dst, src, AluType::Plain, |x, y| x << y)
2981 }
2982 fn r#mod<'a>(
2983 &mut self,
2984 context: &mut Self::Context<'a>,
2985 dst: Register,
2986 src: Source,
2987 ) -> Result<(), String> {
2988 self.alu(Some("mod32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x % y))
2989 }
2990 fn mod64<'a>(
2991 &mut self,
2992 context: &mut Self::Context<'a>,
2993 dst: Register,
2994 src: Source,
2995 ) -> Result<(), String> {
2996 self.alu(Some("mod"), context, dst, src, AluType::Plain, |x, y| x % y)
2997 }
2998 fn mov<'a>(
2999 &mut self,
3000 context: &mut Self::Context<'a>,
3001 dst: Register,
3002 src: Source,
3003 ) -> Result<(), String> {
3004 bpf_log!(self, context, "mov32 {}, {}", display_register(dst), display_source(src));
3005 let src = self.compute_source(src)?;
3006 let value = match src {
3007 Type::ScalarValue(data) => {
3008 let value = (data.value as u32) as u64;
3009 let unknown_mask = (data.unknown_mask as u32) as u64;
3010 let unwritten_mask = (data.unwritten_mask as u32) as u64;
3011 let urange = U64Range::compute_range_for_bytes_swap(0.into(), data.urange, 0, 0, 4);
3012 Type::ScalarValue(ScalarValueData::new(value, unknown_mask, unwritten_mask, urange))
3013 }
3014 _ => Type::default(),
3015 };
3016 let mut next = self.next()?;
3017 next.set_reg(dst, value)?;
3018 context.states.push(next);
3019 Ok(())
3020 }
3021 fn mov64<'a>(
3022 &mut self,
3023 context: &mut Self::Context<'a>,
3024 dst: Register,
3025 src: Source,
3026 ) -> Result<(), String> {
3027 bpf_log!(self, context, "mov {}, {}", display_register(dst), display_source(src));
3028 let src = self.compute_source(src)?;
3029 let mut next = self.next()?;
3030 next.set_reg(dst, src)?;
3031 context.states.push(next);
3032 Ok(())
3033 }
3034 fn mul<'a>(
3035 &mut self,
3036 context: &mut Self::Context<'a>,
3037 dst: Register,
3038 src: Source,
3039 ) -> Result<(), String> {
3040 self.alu(Some("mul32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x * y))
3041 }
3042 fn mul64<'a>(
3043 &mut self,
3044 context: &mut Self::Context<'a>,
3045 dst: Register,
3046 src: Source,
3047 ) -> Result<(), String> {
3048 self.alu(Some("mul"), context, dst, src, AluType::Plain, |x, y| x * y)
3049 }
3050 fn or<'a>(
3051 &mut self,
3052 context: &mut Self::Context<'a>,
3053 dst: Register,
3054 src: Source,
3055 ) -> Result<(), String> {
3056 self.alu(Some("or32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x | y))
3057 }
3058 fn or64<'a>(
3059 &mut self,
3060 context: &mut Self::Context<'a>,
3061 dst: Register,
3062 src: Source,
3063 ) -> Result<(), String> {
3064 self.alu(Some("or"), context, dst, src, AluType::Plain, |x, y| x | y)
3065 }
3066 fn rsh<'a>(
3067 &mut self,
3068 context: &mut Self::Context<'a>,
3069 dst: Register,
3070 src: Source,
3071 ) -> Result<(), String> {
3072 self.alu(Some("rsh32"), context, dst, src, AluType::Plain, |x, y| {
3073 alu32(x, y, |x, y| x >> y)
3074 })
3075 }
3076 fn rsh64<'a>(
3077 &mut self,
3078 context: &mut Self::Context<'a>,
3079 dst: Register,
3080 src: Source,
3081 ) -> Result<(), String> {
3082 self.alu(Some("rsh"), context, dst, src, AluType::Plain, |x, y| x >> y)
3083 }
3084 fn sub<'a>(
3085 &mut self,
3086 context: &mut Self::Context<'a>,
3087 dst: Register,
3088 src: Source,
3089 ) -> Result<(), String> {
3090 self.alu(Some("sub32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x - y))
3091 }
3092 fn sub64<'a>(
3093 &mut self,
3094 context: &mut Self::Context<'a>,
3095 dst: Register,
3096 src: Source,
3097 ) -> Result<(), String> {
3098 self.alu(Some("sub"), context, dst, src, AluType::Sub, |x, y| x - y)
3099 }
3100 fn xor<'a>(
3101 &mut self,
3102 context: &mut Self::Context<'a>,
3103 dst: Register,
3104 src: Source,
3105 ) -> Result<(), String> {
3106 self.alu(Some("xor32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x ^ y))
3107 }
3108 fn xor64<'a>(
3109 &mut self,
3110 context: &mut Self::Context<'a>,
3111 dst: Register,
3112 src: Source,
3113 ) -> Result<(), String> {
3114 self.alu(Some("xor"), context, dst, src, AluType::Plain, |x, y| x ^ y)
3115 }
3116
3117 fn neg<'a>(&mut self, context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
3118 bpf_log!(self, context, "neg32 {}", display_register(dst));
3119 self.alu(None, context, dst, Source::Value(0), AluType::Plain, |x, y| {
3120 alu32(x, y, |x, _y| -x)
3121 })
3122 }
3123 fn neg64<'a>(&mut self, context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
3124 bpf_log!(self, context, "neg {}", display_register(dst));
3125 self.alu(None, context, dst, Source::Value(0), AluType::Plain, |x, _y| -x)
3126 }
3127
3128 fn be<'a>(
3129 &mut self,
3130 context: &mut Self::Context<'a>,
3131 dst: Register,
3132 width: DataWidth,
3133 ) -> Result<(), String> {
3134 self.endianness::<BigEndian>("be", context, dst, width)
3135 }
3136
3137 fn le<'a>(
3138 &mut self,
3139 context: &mut Self::Context<'a>,
3140 dst: Register,
3141 width: DataWidth,
3142 ) -> Result<(), String> {
3143 self.endianness::<LittleEndian>("le", context, dst, width)
3144 }
3145
3146 fn call_external<'a>(
3147 &mut self,
3148 context: &mut Self::Context<'a>,
3149 index: u32,
3150 ) -> Result<(), String> {
3151 bpf_log!(self, context, "call 0x{:x}", index);
3152 let Some(signature) = context.calling_context.helpers.get(&index).cloned() else {
3153 return Err(format!("unknown external function {}", index));
3154 };
3155 debug_assert!(signature.args.len() <= 5);
3156 let mut next = self.next()?;
3157 for (index, arg) in signature.args.iter().enumerate() {
3158 let reg = (index + 1) as u8;
3159 self.reg(reg)?.match_parameter_type(self, arg, index, &mut next)?;
3160 }
3161 if signature.invalidate_array_bounds {
3163 next.array_bounds.clear();
3164 }
3165 let value =
3166 self.resolve_return_value(context, &signature.return_value, &mut next, false)?;
3167 next.set_reg(0, value)?;
3168 for i in 1..=5 {
3169 next.set_reg(i, Type::default())?;
3170 }
3171 context.states.push(next);
3172 Ok(())
3173 }
3174
3175 fn exit<'a>(&mut self, context: &mut Self::Context<'a>) -> Result<(), String> {
3176 bpf_log!(self, context, "exit");
3177 if !self.reg(0)?.is_written_scalar() {
3178 return Err("register 0 is incorrect at exit time".to_string());
3179 }
3180 if !self.resources.is_empty() {
3181 return Err("some resources have not been released at exit time".to_string());
3182 }
3183 let this = self.clone();
3184 this.terminate(context)?;
3185 Ok(())
3187 }
3188
3189 fn jump<'a>(&mut self, context: &mut Self::Context<'a>, offset: i16) -> Result<(), String> {
3190 bpf_log!(self, context, "ja {}", offset);
3191 let parent = Some(Arc::new(self.clone()));
3192 context.states.push(self.jump_with_offset(offset, parent)?);
3193 Ok(())
3194 }
3195
3196 fn jeq<'a>(
3197 &mut self,
3198 context: &mut Self::Context<'a>,
3199 dst: Register,
3200 src: Source,
3201 offset: i16,
3202 ) -> Result<(), String> {
3203 self.conditional_jump(
3204 "jeq32",
3205 context,
3206 dst,
3207 src,
3208 offset,
3209 JumpType::Eq,
3210 JumpWidth::W32,
3211 |x, y| {
3212 comp32(x, y, |x, y| {
3213 if x.min == x.max && x.min == y.min && x.min == y.max {
3215 return Some(true);
3216 }
3217 if x.max < y.min || y.max < x.min {
3218 return Some(false);
3219 }
3220 None
3221 })
3222 },
3223 )
3224 }
3225 fn jeq64<'a>(
3226 &mut self,
3227 context: &mut Self::Context<'a>,
3228 dst: Register,
3229 src: Source,
3230 offset: i16,
3231 ) -> Result<(), String> {
3232 self.conditional_jump(
3233 "jeq",
3234 context,
3235 dst,
3236 src,
3237 offset,
3238 JumpType::Eq,
3239 JumpWidth::W64,
3240 |x, y| {
3241 comp64(x, y, |x, y| {
3242 if x.min == x.max && x.min == y.min && x.min == y.max {
3244 return Some(true);
3245 }
3246 if x.max < y.min || y.max < x.min {
3247 return Some(false);
3248 }
3249 None
3250 })
3251 },
3252 )
3253 }
3254 fn jne<'a>(
3255 &mut self,
3256 context: &mut Self::Context<'a>,
3257 dst: Register,
3258 src: Source,
3259 offset: i16,
3260 ) -> Result<(), String> {
3261 self.conditional_jump(
3262 "jne32",
3263 context,
3264 dst,
3265 src,
3266 offset,
3267 JumpType::Ne,
3268 JumpWidth::W32,
3269 |x, y| {
3270 comp32(x, y, |x, y| {
3271 if x.min == x.max && x.min == y.min && x.min == y.max {
3273 return Some(false);
3274 }
3275 if x.max < y.min || y.max < x.min {
3276 return Some(true);
3277 }
3278 None
3279 })
3280 },
3281 )
3282 }
3283 fn jne64<'a>(
3284 &mut self,
3285 context: &mut Self::Context<'a>,
3286 dst: Register,
3287 src: Source,
3288 offset: i16,
3289 ) -> Result<(), String> {
3290 self.conditional_jump(
3291 "jne",
3292 context,
3293 dst,
3294 src,
3295 offset,
3296 JumpType::Ne,
3297 JumpWidth::W64,
3298 |x, y| {
3299 comp64(x, y, |x, y| {
3300 if x.min == x.max && x.min == y.min && x.min == y.max {
3302 return Some(false);
3303 }
3304 if x.max < y.min || y.max < x.min {
3305 return Some(true);
3306 }
3307 None
3308 })
3309 },
3310 )
3311 }
3312 fn jge<'a>(
3313 &mut self,
3314 context: &mut Self::Context<'a>,
3315 dst: Register,
3316 src: Source,
3317 offset: i16,
3318 ) -> Result<(), String> {
3319 self.conditional_jump(
3320 "jge32",
3321 context,
3322 dst,
3323 src,
3324 offset,
3325 JumpType::Ge,
3326 JumpWidth::W32,
3327 |x, y| {
3328 comp32(x, y, |x, y| {
3329 if x.min >= y.max {
3331 return Some(true);
3332 }
3333 if y.min > x.max {
3334 return Some(false);
3335 }
3336 None
3337 })
3338 },
3339 )
3340 }
3341 fn jge64<'a>(
3342 &mut self,
3343 context: &mut Self::Context<'a>,
3344 dst: Register,
3345 src: Source,
3346 offset: i16,
3347 ) -> Result<(), String> {
3348 self.conditional_jump(
3349 "jge",
3350 context,
3351 dst,
3352 src,
3353 offset,
3354 JumpType::Ge,
3355 JumpWidth::W64,
3356 |x, y| {
3357 comp64(x, y, |x, y| {
3358 if x.min >= y.max {
3360 return Some(true);
3361 }
3362 if y.min > x.max {
3363 return Some(false);
3364 }
3365 None
3366 })
3367 },
3368 )
3369 }
3370 fn jgt<'a>(
3371 &mut self,
3372 context: &mut Self::Context<'a>,
3373 dst: Register,
3374 src: Source,
3375 offset: i16,
3376 ) -> Result<(), String> {
3377 self.conditional_jump(
3378 "jgt32",
3379 context,
3380 dst,
3381 src,
3382 offset,
3383 JumpType::Gt,
3384 JumpWidth::W32,
3385 |x, y| {
3386 comp32(x, y, |x, y| {
3387 if x.min > y.max {
3389 return Some(true);
3390 }
3391 if y.min >= x.max {
3392 return Some(false);
3393 }
3394 None
3395 })
3396 },
3397 )
3398 }
3399 fn jgt64<'a>(
3400 &mut self,
3401 context: &mut Self::Context<'a>,
3402 dst: Register,
3403 src: Source,
3404 offset: i16,
3405 ) -> Result<(), String> {
3406 self.conditional_jump(
3407 "jgt",
3408 context,
3409 dst,
3410 src,
3411 offset,
3412 JumpType::Gt,
3413 JumpWidth::W64,
3414 |x, y| {
3415 comp64(x, y, |x, y| {
3416 if x.min > y.max {
3418 return Some(true);
3419 }
3420 if y.min >= x.max {
3421 return Some(false);
3422 }
3423 None
3424 })
3425 },
3426 )
3427 }
3428 fn jle<'a>(
3429 &mut self,
3430 context: &mut Self::Context<'a>,
3431 dst: Register,
3432 src: Source,
3433 offset: i16,
3434 ) -> Result<(), String> {
3435 self.conditional_jump(
3436 "jle32",
3437 context,
3438 dst,
3439 src,
3440 offset,
3441 JumpType::Le,
3442 JumpWidth::W32,
3443 |x, y| {
3444 comp32(x, y, |x, y| {
3445 if x.max <= y.min {
3447 return Some(true);
3448 }
3449 if y.max < x.min {
3450 return Some(false);
3451 }
3452 None
3453 })
3454 },
3455 )
3456 }
3457 fn jle64<'a>(
3458 &mut self,
3459 context: &mut Self::Context<'a>,
3460 dst: Register,
3461 src: Source,
3462 offset: i16,
3463 ) -> Result<(), String> {
3464 self.conditional_jump(
3465 "jle",
3466 context,
3467 dst,
3468 src,
3469 offset,
3470 JumpType::Le,
3471 JumpWidth::W64,
3472 |x, y| {
3473 comp64(x, y, |x, y| {
3474 if x.max <= y.min {
3476 return Some(true);
3477 }
3478 if y.max < x.min {
3479 return Some(false);
3480 }
3481 None
3482 })
3483 },
3484 )
3485 }
3486 fn jlt<'a>(
3487 &mut self,
3488 context: &mut Self::Context<'a>,
3489 dst: Register,
3490 src: Source,
3491 offset: i16,
3492 ) -> Result<(), String> {
3493 self.conditional_jump(
3494 "jlt32",
3495 context,
3496 dst,
3497 src,
3498 offset,
3499 JumpType::Lt,
3500 JumpWidth::W32,
3501 |x, y| {
3502 comp32(x, y, |x, y| {
3503 if x.max < y.min {
3505 return Some(true);
3506 }
3507 if y.max <= x.min {
3508 return Some(false);
3509 }
3510 None
3511 })
3512 },
3513 )
3514 }
3515 fn jlt64<'a>(
3516 &mut self,
3517 context: &mut Self::Context<'a>,
3518 dst: Register,
3519 src: Source,
3520 offset: i16,
3521 ) -> Result<(), String> {
3522 self.conditional_jump(
3523 "jlt",
3524 context,
3525 dst,
3526 src,
3527 offset,
3528 JumpType::Lt,
3529 JumpWidth::W64,
3530 |x, y| {
3531 comp64(x, y, |x, y| {
3532 if x.max < y.min {
3534 return Some(true);
3535 }
3536 if y.max <= x.min {
3537 return Some(false);
3538 }
3539 None
3540 })
3541 },
3542 )
3543 }
3544 fn jsge<'a>(
3545 &mut self,
3546 context: &mut Self::Context<'a>,
3547 dst: Register,
3548 src: Source,
3549 offset: i16,
3550 ) -> Result<(), String> {
3551 self.conditional_jump(
3552 "jsge32",
3553 context,
3554 dst,
3555 src,
3556 offset,
3557 JumpType::LooseComparaison,
3558 JumpWidth::W32,
3559 |x, y| scomp32(x, y, |x, y| x >= y),
3560 )
3561 }
3562 fn jsge64<'a>(
3563 &mut self,
3564 context: &mut Self::Context<'a>,
3565 dst: Register,
3566 src: Source,
3567 offset: i16,
3568 ) -> Result<(), String> {
3569 self.conditional_jump(
3570 "jsge",
3571 context,
3572 dst,
3573 src,
3574 offset,
3575 JumpType::LooseComparaison,
3576 JumpWidth::W64,
3577 |x, y| scomp64(x, y, |x, y| x >= y),
3578 )
3579 }
3580 fn jsgt<'a>(
3581 &mut self,
3582 context: &mut Self::Context<'a>,
3583 dst: Register,
3584 src: Source,
3585 offset: i16,
3586 ) -> Result<(), String> {
3587 self.conditional_jump(
3588 "jsgt32",
3589 context,
3590 dst,
3591 src,
3592 offset,
3593 JumpType::StrictComparaison,
3594 JumpWidth::W32,
3595 |x, y| scomp32(x, y, |x, y| x > y),
3596 )
3597 }
3598 fn jsgt64<'a>(
3599 &mut self,
3600 context: &mut Self::Context<'a>,
3601 dst: Register,
3602 src: Source,
3603 offset: i16,
3604 ) -> Result<(), String> {
3605 self.conditional_jump(
3606 "jsgt",
3607 context,
3608 dst,
3609 src,
3610 offset,
3611 JumpType::StrictComparaison,
3612 JumpWidth::W64,
3613 |x, y| scomp64(x, y, |x, y| x > y),
3614 )
3615 }
3616 fn jsle<'a>(
3617 &mut self,
3618 context: &mut Self::Context<'a>,
3619 dst: Register,
3620 src: Source,
3621 offset: i16,
3622 ) -> Result<(), String> {
3623 self.conditional_jump(
3624 "jsle32",
3625 context,
3626 dst,
3627 src,
3628 offset,
3629 JumpType::LooseComparaison,
3630 JumpWidth::W32,
3631 |x, y| scomp32(x, y, |x, y| x <= y),
3632 )
3633 }
3634 fn jsle64<'a>(
3635 &mut self,
3636 context: &mut Self::Context<'a>,
3637 dst: Register,
3638 src: Source,
3639 offset: i16,
3640 ) -> Result<(), String> {
3641 self.conditional_jump(
3642 "jsle",
3643 context,
3644 dst,
3645 src,
3646 offset,
3647 JumpType::LooseComparaison,
3648 JumpWidth::W64,
3649 |x, y| scomp64(x, y, |x, y| x <= y),
3650 )
3651 }
3652 fn jslt<'a>(
3653 &mut self,
3654 context: &mut Self::Context<'a>,
3655 dst: Register,
3656 src: Source,
3657 offset: i16,
3658 ) -> Result<(), String> {
3659 self.conditional_jump(
3660 "jslt32",
3661 context,
3662 dst,
3663 src,
3664 offset,
3665 JumpType::StrictComparaison,
3666 JumpWidth::W32,
3667 |x, y| scomp32(x, y, |x, y| x < y),
3668 )
3669 }
3670 fn jslt64<'a>(
3671 &mut self,
3672 context: &mut Self::Context<'a>,
3673 dst: Register,
3674 src: Source,
3675 offset: i16,
3676 ) -> Result<(), String> {
3677 self.conditional_jump(
3678 "jslt",
3679 context,
3680 dst,
3681 src,
3682 offset,
3683 JumpType::StrictComparaison,
3684 JumpWidth::W64,
3685 |x, y| scomp64(x, y, |x, y| x < y),
3686 )
3687 }
3688 fn jset<'a>(
3689 &mut self,
3690 context: &mut Self::Context<'a>,
3691 dst: Register,
3692 src: Source,
3693 offset: i16,
3694 ) -> Result<(), String> {
3695 self.conditional_jump(
3696 "jset32",
3697 context,
3698 dst,
3699 src,
3700 offset,
3701 JumpType::Unknown,
3702 JumpWidth::W32,
3703 |x, y| {
3704 comp32(x, y, |x, y| {
3705 if x.min != x.max || y.min != y.max {
3707 return None;
3708 }
3709 Some(x.min & y.min != 0)
3710 })
3711 },
3712 )
3713 }
3714 fn jset64<'a>(
3715 &mut self,
3716 context: &mut Self::Context<'a>,
3717 dst: Register,
3718 src: Source,
3719 offset: i16,
3720 ) -> Result<(), String> {
3721 self.conditional_jump(
3722 "jset",
3723 context,
3724 dst,
3725 src,
3726 offset,
3727 JumpType::Unknown,
3728 JumpWidth::W64,
3729 |x, y| {
3730 comp64(x, y, |x, y| {
3731 if x.min != x.max || y.min != y.max {
3733 return None;
3734 }
3735 Some(x.min & y.min != 0)
3736 })
3737 },
3738 )
3739 }
3740
3741 fn atomic_add<'a>(
3742 &mut self,
3743 context: &mut Self::Context<'a>,
3744 fetch: bool,
3745 dst: Register,
3746 offset: i16,
3747 src: Register,
3748 ) -> Result<(), String> {
3749 self.atomic_operation(
3750 "add32",
3751 context,
3752 DataWidth::U32,
3753 fetch,
3754 dst,
3755 offset,
3756 src,
3757 AluType::Add,
3758 |x, y| alu32(x, y, |x, y| x + y),
3759 )
3760 }
3761
3762 fn atomic_add64<'a>(
3763 &mut self,
3764 context: &mut Self::Context<'a>,
3765 fetch: bool,
3766 dst: Register,
3767 offset: i16,
3768 src: Register,
3769 ) -> Result<(), String> {
3770 self.atomic_operation(
3771 "add",
3772 context,
3773 DataWidth::U64,
3774 fetch,
3775 dst,
3776 offset,
3777 src,
3778 AluType::Add,
3779 |x, y| x + y,
3780 )
3781 }
3782
3783 fn atomic_and<'a>(
3784 &mut self,
3785 context: &mut Self::Context<'a>,
3786 fetch: bool,
3787 dst: Register,
3788 offset: i16,
3789 src: Register,
3790 ) -> Result<(), String> {
3791 self.atomic_operation(
3792 "and32",
3793 context,
3794 DataWidth::U32,
3795 fetch,
3796 dst,
3797 offset,
3798 src,
3799 AluType::Plain,
3800 |x, y| alu32(x, y, |x, y| x & y),
3801 )
3802 }
3803
3804 fn atomic_and64<'a>(
3805 &mut self,
3806 context: &mut Self::Context<'a>,
3807 fetch: bool,
3808 dst: Register,
3809 offset: i16,
3810 src: Register,
3811 ) -> Result<(), String> {
3812 self.atomic_operation(
3813 "and",
3814 context,
3815 DataWidth::U64,
3816 fetch,
3817 dst,
3818 offset,
3819 src,
3820 AluType::Plain,
3821 |x, y| x & y,
3822 )
3823 }
3824
3825 fn atomic_or<'a>(
3826 &mut self,
3827 context: &mut Self::Context<'a>,
3828 fetch: bool,
3829 dst: Register,
3830 offset: i16,
3831 src: Register,
3832 ) -> Result<(), String> {
3833 self.atomic_operation(
3834 "or32",
3835 context,
3836 DataWidth::U32,
3837 fetch,
3838 dst,
3839 offset,
3840 src,
3841 AluType::Plain,
3842 |x, y| alu32(x, y, |x, y| x | y),
3843 )
3844 }
3845
3846 fn atomic_or64<'a>(
3847 &mut self,
3848 context: &mut Self::Context<'a>,
3849 fetch: bool,
3850 dst: Register,
3851 offset: i16,
3852 src: Register,
3853 ) -> Result<(), String> {
3854 self.atomic_operation(
3855 "or",
3856 context,
3857 DataWidth::U64,
3858 fetch,
3859 dst,
3860 offset,
3861 src,
3862 AluType::Plain,
3863 |x, y| x | y,
3864 )
3865 }
3866
3867 fn atomic_xor<'a>(
3868 &mut self,
3869 context: &mut Self::Context<'a>,
3870 fetch: bool,
3871 dst: Register,
3872 offset: i16,
3873 src: Register,
3874 ) -> Result<(), String> {
3875 self.atomic_operation(
3876 "xor32",
3877 context,
3878 DataWidth::U32,
3879 fetch,
3880 dst,
3881 offset,
3882 src,
3883 AluType::Plain,
3884 |x, y| alu32(x, y, |x, y| x ^ y),
3885 )
3886 }
3887
3888 fn atomic_xor64<'a>(
3889 &mut self,
3890 context: &mut Self::Context<'a>,
3891 fetch: bool,
3892 dst: Register,
3893 offset: i16,
3894 src: Register,
3895 ) -> Result<(), String> {
3896 self.atomic_operation(
3897 "xor",
3898 context,
3899 DataWidth::U64,
3900 fetch,
3901 dst,
3902 offset,
3903 src,
3904 AluType::Plain,
3905 |x, y| x ^ y,
3906 )
3907 }
3908
3909 fn atomic_xchg<'a>(
3910 &mut self,
3911 context: &mut Self::Context<'a>,
3912 fetch: bool,
3913 dst: Register,
3914 offset: i16,
3915 src: Register,
3916 ) -> Result<(), String> {
3917 self.atomic_operation(
3918 "xchg32",
3919 context,
3920 DataWidth::U32,
3921 fetch,
3922 dst,
3923 offset,
3924 src,
3925 AluType::Plain,
3926 |_, x| x,
3927 )
3928 }
3929
3930 fn atomic_xchg64<'a>(
3931 &mut self,
3932 context: &mut Self::Context<'a>,
3933 fetch: bool,
3934 dst: Register,
3935 offset: i16,
3936 src: Register,
3937 ) -> Result<(), String> {
3938 self.raw_atomic_operation(
3939 "xchg",
3940 context,
3941 DataWidth::U64,
3942 fetch,
3943 dst,
3944 offset,
3945 src,
3946 |_, _, x| Ok(x),
3947 )
3948 }
3949
3950 fn atomic_cmpxchg<'a>(
3951 &mut self,
3952 context: &mut Self::Context<'a>,
3953 dst: Register,
3954 offset: i16,
3955 src: Register,
3956 ) -> Result<(), String> {
3957 self.raw_atomic_cmpxchg("cmpxchg32", context, dst, offset, src, JumpWidth::W32, |x, y| {
3958 comp32(x, y, |x, y| {
3959 if x.min == x.max && x.min == y.min && x.min == y.max {
3961 return Some(true);
3962 }
3963 if x.max < y.min || y.max < x.min {
3964 return Some(false);
3965 }
3966 None
3967 })
3968 })
3969 }
3970
3971 fn atomic_cmpxchg64<'a>(
3972 &mut self,
3973 context: &mut Self::Context<'a>,
3974 dst: Register,
3975 offset: i16,
3976 src: Register,
3977 ) -> Result<(), String> {
3978 self.raw_atomic_cmpxchg("cmpxchg", context, dst, offset, src, JumpWidth::W64, |x, y| {
3979 comp64(x, y, |x, y| {
3980 if x.min == x.max && x.min == y.min && x.min == y.max {
3982 return Some(true);
3983 }
3984 if x.max < y.min || y.max < x.min {
3985 return Some(false);
3986 }
3987 None
3988 })
3989 })
3990 }
3991
3992 fn load<'a>(
3993 &mut self,
3994 context: &mut Self::Context<'a>,
3995 dst: Register,
3996 offset: i16,
3997 src: Register,
3998 width: DataWidth,
3999 ) -> Result<(), String> {
4000 bpf_log!(
4001 self,
4002 context,
4003 "ldx{} {}, [{}{}]",
4004 width.str(),
4005 display_register(dst),
4006 display_register(src),
4007 print_offset(offset)
4008 );
4009 let addr = self.reg(src)?;
4010 let loaded_type = self.load_memory(context, &addr, Field::new(offset, width))?;
4011 let mut next = self.next()?;
4012 next.set_reg(dst, loaded_type)?;
4013 context.states.push(next);
4014 Ok(())
4015 }
4016
4017 fn load64<'a>(
4018 &mut self,
4019 context: &mut Self::Context<'a>,
4020 dst: Register,
4021 src: u8,
4022 lower: u32,
4023 ) -> Result<(), String> {
4024 let Some(next_instruction) = context.code.get(self.pc + 1) else {
4025 return Err(format!("incomplete lddw"));
4026 };
4027 if next_instruction.src_reg() != 0 || next_instruction.dst_reg() != 0 {
4028 return Err(format!("invalid lddw"));
4029 }
4030
4031 let value = match src {
4032 0 => {
4033 let value = (lower as u64) | (((next_instruction.imm() as u32) as u64) << 32);
4034 bpf_log!(self, context, "lddw {}, 0x{:x}", display_register(dst), value);
4035 Type::from(value)
4036 }
4037 BPF_PSEUDO_MAP_IDX => {
4038 let map_index = lower;
4039 bpf_log!(
4040 self,
4041 context,
4042 "lddw {}, map_by_index({:x})",
4043 display_register(dst),
4044 map_index
4045 );
4046 context
4047 .calling_context
4048 .maps
4049 .get(usize::try_from(map_index).unwrap())
4050 .map(|schema| Type::ConstPtrToMap { id: map_index.into(), schema: *schema })
4051 .ok_or_else(|| format!("lddw with invalid map index: {}", map_index))?
4052 }
4053 BPF_PSEUDO_MAP_IDX_VALUE => {
4054 let map_index = lower;
4055 let offset = next_instruction.imm();
4056 bpf_log!(
4057 self,
4058 context,
4059 "lddw {}, map_value_by_index({:x})+{offset}",
4060 display_register(dst),
4061 map_index
4062 );
4063 let id = context.next_id();
4064 let map_schema = context
4065 .calling_context
4066 .maps
4067 .get(usize::try_from(map_index).unwrap())
4068 .ok_or_else(|| format!("lddw with invalid map index: {}", map_index))?;
4069
4070 if map_schema.map_type != bpf_map_type_BPF_MAP_TYPE_ARRAY {
4071 return Err(format!(
4072 "Invalid map type at index {map_index} for lddw. Expecting array."
4073 ));
4074 }
4075 if map_schema.max_entries == 0 {
4076 return Err(format!("Array has no entry."));
4077 }
4078
4079 Type::PtrToMemory {
4080 id: MemoryId::from(id),
4081 offset: offset.into(),
4082 buffer_size: map_schema.value_size.into(),
4083 }
4084 }
4085 _ => {
4086 return Err(format!("invalid lddw"));
4087 }
4088 };
4089
4090 let parent = Some(Arc::new(self.clone()));
4091 let mut next = self.jump_with_offset(1, parent)?;
4092 next.set_reg(dst, value.into())?;
4093
4094 context.states.push(next);
4095 Ok(())
4096 }
4097
4098 fn load_from_packet<'a>(
4099 &mut self,
4100 context: &mut Self::Context<'a>,
4101 dst: Register,
4102 src: Register,
4103 offset: i32,
4104 register_offset: Option<Register>,
4105 width: DataWidth,
4106 ) -> Result<(), String> {
4107 bpf_log!(
4108 self,
4109 context,
4110 "ldp{} {}{}",
4111 width.str(),
4112 register_offset.map(display_register).unwrap_or_else(Default::default),
4113 print_offset(offset)
4114 );
4115
4116 let src_type = self.reg(src)?;
4118 let src_is_packet = match &context.calling_context.packet_type {
4119 Some(packet_type) => src_type == *packet_type,
4120 None => false,
4121 };
4122 if !src_is_packet {
4123 return Err(format!("R{} is not a packet", src));
4124 }
4125
4126 if let Some(reg) = register_offset {
4127 let reg = self.reg(reg)?;
4128 if !reg.is_written_scalar() {
4129 return Err("access to unwritten offset".to_string());
4130 }
4131 }
4132 let mut next = self.next()?;
4134 next.set_reg(dst, Type::UNKNOWN_SCALAR)?;
4135 for i in 1..=5 {
4136 next.set_reg(i, Type::default())?;
4137 }
4138 context.states.push(next);
4139 if !self.reg(0)?.is_written_scalar() {
4141 return Err("register 0 is incorrect at exit time".to_string());
4142 }
4143 if !self.resources.is_empty() {
4144 return Err("some resources have not been released at exit time".to_string());
4145 }
4146 let this = self.clone();
4147 this.terminate(context)?;
4148 Ok(())
4150 }
4151
4152 fn store<'a>(
4153 &mut self,
4154 context: &mut Self::Context<'a>,
4155 dst: Register,
4156 offset: i16,
4157 src: Source,
4158 width: DataWidth,
4159 ) -> Result<(), String> {
4160 let value = match src {
4161 Source::Reg(r) => {
4162 bpf_log!(
4163 self,
4164 context,
4165 "stx{} [{}{}], {}",
4166 width.str(),
4167 display_register(dst),
4168 print_offset(offset),
4169 display_register(r),
4170 );
4171 self.reg(r)?
4172 }
4173 Source::Value(v) => {
4174 bpf_log!(
4175 self,
4176 context,
4177 "st{} [{}{}], 0x{:x}",
4178 width.str(),
4179 display_register(dst),
4180 print_offset(offset),
4181 v,
4182 );
4183 Type::from(v & Type::mask(width))
4184 }
4185 };
4186 let mut next = self.next()?;
4187 let addr = self.reg(dst)?;
4188 next.store_memory(context, &addr, Field::new(offset, width), value)?;
4189 context.states.push(next);
4190 Ok(())
4191 }
4192}
4193
4194fn alu32(
4195 x: ScalarValueData,
4196 y: ScalarValueData,
4197 op: impl FnOnce(U32ScalarValueData, U32ScalarValueData) -> U32ScalarValueData,
4198) -> ScalarValueData {
4199 op(U32ScalarValueData::from(x), U32ScalarValueData::from(y)).into()
4200}
4201
4202fn comp64(
4203 x: ScalarValueData,
4204 y: ScalarValueData,
4205 op: impl FnOnce(U64Range, U64Range) -> Option<bool>,
4206) -> Result<Option<bool>, ()> {
4207 if !x.is_fully_initialized() || !y.is_fully_initialized() {
4208 return Err(());
4209 }
4210 Ok(op(x.urange, y.urange))
4211}
4212
4213fn comp32(
4214 x: ScalarValueData,
4215 y: ScalarValueData,
4216 op: impl FnOnce(U32Range, U32Range) -> Option<bool>,
4217) -> Result<Option<bool>, ()> {
4218 let x = U32ScalarValueData::from(x);
4219 let y = U32ScalarValueData::from(y);
4220 if !x.is_fully_initialized() || !y.is_fully_initialized() {
4221 return Err(());
4222 }
4223 Ok(op(x.urange, y.urange))
4224}
4225
4226fn scomp64(
4227 x: ScalarValueData,
4228 y: ScalarValueData,
4229 op: impl FnOnce(i64, i64) -> bool,
4230) -> Result<Option<bool>, ()> {
4231 if !x.is_fully_initialized() || !y.is_fully_initialized() {
4232 return Err(());
4233 }
4234 if !x.is_known() || !y.is_known() {
4235 return Ok(None);
4236 }
4237 Ok(Some(op(x.value as i64, y.value as i64)))
4238}
4239
4240fn scomp32(
4241 x: ScalarValueData,
4242 y: ScalarValueData,
4243 op: impl FnOnce(i32, i32) -> bool,
4244) -> Result<Option<bool>, ()> {
4245 let x = U32ScalarValueData::from(x);
4246 let y = U32ScalarValueData::from(y);
4247 if !x.is_fully_initialized() || !y.is_fully_initialized() {
4248 return Err(());
4249 }
4250 if !x.is_known() || !y.is_known() {
4251 return Ok(None);
4252 }
4253 Ok(Some(op(x.value as i32, y.value as i32)))
4254}
4255
4256fn print_offset<T: Into<i32>>(offset: T) -> String {
4257 let offset: i32 = offset.into();
4258 if offset == 0 {
4259 String::new()
4260 } else if offset > 0 {
4261 format!("+{offset}")
4262 } else {
4263 format!("{offset}")
4264 }
4265}
4266
4267fn run_on_stack_offset<F>(v: StackOffset, f: F) -> StackOffset
4268where
4269 F: FnOnce(ScalarValueData) -> ScalarValueData,
4270{
4271 StackOffset(f(v.reg()))
4272}
4273
4274fn error_and_log<T>(
4275 logger: &mut dyn VerifierLogger,
4276 msg: impl std::string::ToString,
4277) -> Result<T, EbpfError> {
4278 let msg = msg.to_string();
4279 logger.log(msg.as_bytes());
4280 return Err(EbpfError::ProgramVerifyError(msg));
4281}
4282
4283fn associate_orderings(o1: Ordering, o2: Ordering) -> Option<Ordering> {
4284 match (o1, o2) {
4285 (o1, o2) if o1 == o2 => Some(o1),
4286 (o, Ordering::Equal) | (Ordering::Equal, o) => Some(o),
4287 _ => None,
4288 }
4289}
4290
4291#[cfg(test)]
4292mod tests {
4293 use super::*;
4294 use std::collections::BTreeSet;
4295 use test_util::{assert_geq, assert_leq};
4296
4297 #[test]
4298 fn test_type_ordering() {
4299 let t0 = Type::from(0);
4300 let t1 = Type::from(1);
4301 let random = Type::AliasParameter { parameter_index: 8 };
4302 let unknown_written = Type::UNKNOWN_SCALAR;
4303 let unwritten = Type::default();
4304
4305 assert_eq!(t0.partial_cmp(&t0), Some(Ordering::Equal));
4306 assert_eq!(t0.partial_cmp(&t1), None);
4307 assert_eq!(t0.partial_cmp(&random), None);
4308 assert_eq!(t0.partial_cmp(&unknown_written), Some(Ordering::Less));
4309 assert_eq!(t0.partial_cmp(&unwritten), Some(Ordering::Less));
4310
4311 assert_eq!(t1.partial_cmp(&t0), None);
4312 assert_eq!(t1.partial_cmp(&t1), Some(Ordering::Equal));
4313 assert_eq!(t1.partial_cmp(&random), None);
4314 assert_eq!(t1.partial_cmp(&unknown_written), Some(Ordering::Less));
4315 assert_eq!(t1.partial_cmp(&unwritten), Some(Ordering::Less));
4316
4317 assert_eq!(random.partial_cmp(&t0), None);
4318 assert_eq!(random.partial_cmp(&t1), None);
4319 assert_eq!(random.partial_cmp(&random), Some(Ordering::Equal));
4320 assert_eq!(random.partial_cmp(&unknown_written), None);
4321 assert_eq!(random.partial_cmp(&unwritten), Some(Ordering::Less));
4322
4323 assert_eq!(unknown_written.partial_cmp(&t0), Some(Ordering::Greater));
4324 assert_eq!(unknown_written.partial_cmp(&t1), Some(Ordering::Greater));
4325 assert_eq!(unknown_written.partial_cmp(&random), None);
4326 assert_eq!(unknown_written.partial_cmp(&unknown_written), Some(Ordering::Equal));
4327 assert_eq!(unknown_written.partial_cmp(&unwritten), Some(Ordering::Less));
4328
4329 assert_eq!(unwritten.partial_cmp(&t0), Some(Ordering::Greater));
4330 assert_eq!(unwritten.partial_cmp(&t1), Some(Ordering::Greater));
4331 assert_eq!(unwritten.partial_cmp(&random), Some(Ordering::Greater));
4332 assert_eq!(unwritten.partial_cmp(&unknown_written), Some(Ordering::Greater));
4333 assert_eq!(unwritten.partial_cmp(&unwritten), Some(Ordering::Equal));
4334 }
4335
4336 #[test]
4337 fn test_stack_ordering() {
4338 let mut s1 = Stack::default();
4339 let mut s2 = Stack::default();
4340
4341 assert_eq!(s1.partial_cmp(&s2), Some(Ordering::Equal));
4342 s1.set(0, 0.into());
4343 assert_eq!(s1.partial_cmp(&s2), Some(Ordering::Less));
4344 assert_eq!(s2.partial_cmp(&s1), Some(Ordering::Greater));
4345 s2.set(1, 1.into());
4346 assert_eq!(s1.partial_cmp(&s2), None);
4347 assert_eq!(s2.partial_cmp(&s1), None);
4348 }
4349
4350 #[test]
4351 fn test_context_ordering() {
4352 let mut c1 = ComputationContext::default();
4353 let mut c2 = ComputationContext::default();
4354
4355 assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Equal));
4356
4357 c1.array_bounds.insert(1.into(), 5);
4358 assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Less));
4359 assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Greater));
4360
4361 c2.array_bounds.insert(1.into(), 7);
4362 assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4363 assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4364
4365 c1.array_bounds.insert(2.into(), 9);
4366 assert_eq!(c1.partial_cmp(&c2), None);
4367 assert_eq!(c2.partial_cmp(&c1), None);
4368
4369 c2.array_bounds.insert(2.into(), 9);
4370 assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4371 assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4372
4373 c2.array_bounds.insert(3.into(), 12);
4374 assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4375 assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4376
4377 c1.pc = 8;
4378 assert_eq!(c1.partial_cmp(&c2), None);
4379 assert_eq!(c2.partial_cmp(&c1), None);
4380 }
4381
4382 #[test]
4383 fn test_stack_access() {
4384 let mut s = Stack::default();
4385
4386 assert!(s.store(StackOffset(8.into()), Type::UNKNOWN_SCALAR, DataWidth::U64).is_ok());
4389 assert!(s.store(StackOffset(16.into()), Type::UNKNOWN_SCALAR, DataWidth::U64).is_ok());
4390 assert!(s.store(StackOffset(24.into()), Type::UNKNOWN_SCALAR, DataWidth::U16).is_ok());
4391
4392 for offset in 0..32 {
4393 for end in (offset + 1)..32 {
4394 assert_eq!(
4395 s.read_data_ptr(2, StackOffset(offset.into()), (end - offset) as u64).is_ok(),
4396 offset >= 8 && end <= 26
4397 );
4398 }
4399 }
4400
4401 assert!(s.read_data_ptr(2, StackOffset(12.into()), u64::MAX - 2).is_err());
4403 }
4404
4405 #[test]
4406 fn test_compute_range_for_bytes_swap() {
4407 let mut values = BTreeSet::<u64>::default();
4410 for v1 in &[0x00, 0x1, u64::MAX] {
4411 for v2 in &[0x00, 0x1, u64::MAX] {
4412 for v3 in &[0x00, 0x1, u64::MAX] {
4413 values.insert(U64Range::assemble_slices((*v1, *v2, *v3), 1, 1));
4414 }
4415 }
4416 }
4417 let store = |old: u64, new: u64| (old & !0xff00) | ((new & 0xff) << 8);
4419
4420 for old in &values {
4421 for new in &values {
4422 let s = store(*old, *new);
4423 for min_old in values.iter().filter(|v| *v <= old) {
4424 for min_new in values.iter().filter(|v| *v <= new) {
4425 for max_old in values.iter().filter(|v| *v >= old) {
4426 for max_new in values.iter().filter(|v| *v >= new) {
4427 let range = U64Range::compute_range_for_bytes_swap(
4428 U64Range::new(*min_old, *max_old),
4429 U64Range::new(*min_new, *max_new),
4430 1,
4431 0,
4432 1,
4433 );
4434 assert_leq!(range.min, s);
4435 assert_geq!(range.max, s);
4436 }
4437 }
4438 }
4439 }
4440 }
4441 }
4442 }
4443}