1#![allow(unused_variables)]
6
7use crate::scalar_value::{ScalarValueData, U32Range, U32ScalarValueData, U64Range};
8use crate::visitor::{BpfVisitor, ProgramCounter, Register, Source};
9use crate::{
10 BPF_MAX_INSTS, BPF_PSEUDO_MAP_IDX, BPF_PSEUDO_MAP_IDX_VALUE, BPF_STACK_SIZE, DataWidth,
11 EbpfError, EbpfInstruction, GENERAL_REGISTER_COUNT, MapSchema, REGISTER_COUNT,
12};
13use byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian};
14use fuchsia_sync::Mutex;
15use linux_uapi::{bpf_map_type, bpf_map_type_BPF_MAP_TYPE_ARRAY};
16use std::cmp::Ordering;
17use std::collections::{BTreeMap, HashMap, HashSet};
18use std::sync::Arc;
19use zerocopy::IntoBytes;
20
21const U32_MAX: u64 = u32::MAX as u64;
22
23pub trait VerifierLogger {
25 fn log(&mut self, line: &[u8]);
27}
28
29pub struct NullVerifierLogger;
31
32impl VerifierLogger for NullVerifierLogger {
33 fn log(&mut self, line: &[u8]) {
34 debug_assert!(line.is_ascii());
35 }
36}
37
38#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
42pub struct MemoryId {
43 id: u64,
44 parent: Option<Box<MemoryId>>,
45}
46
47impl MemoryId {
48 pub fn id(&self) -> u64 {
49 self.id
50 }
51}
52
53impl From<u64> for MemoryId {
54 fn from(id: u64) -> Self {
55 Self { id, parent: None }
56 }
57}
58
59static BPF_TYPE_IDENTIFIER_COUNTER: std::sync::atomic::AtomicU64 =
63 std::sync::atomic::AtomicU64::new(u64::MAX / 2);
64
65impl MemoryId {
66 pub const fn from_raw(id: u64) -> MemoryId {
67 Self { id, parent: None }
68 }
69
70 pub fn new() -> MemoryId {
71 Self {
72 id: BPF_TYPE_IDENTIFIER_COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
73 parent: None,
74 }
75 }
76
77 fn prepended(&self, other: MemoryId) -> Self {
79 match &self.parent {
80 None => MemoryId { id: self.id, parent: Some(Box::new(other)) },
81 Some(parent) => {
82 MemoryId { id: self.id, parent: Some(Box::new(parent.prepended(other))) }
83 }
84 }
85 }
86
87 fn has_parent(&self, parent: &MemoryId) -> bool {
89 match &self.parent {
90 None => false,
91 Some(p) => p.as_ref() == parent,
92 }
93 }
94
95 fn matches(&self, other: &MemoryId) -> bool {
98 if self.id != other.id {
99 return false;
100 };
101 match (&self.parent, &other.parent) {
102 (Some(p1), Some(p2)) => p1.matches(p2.as_ref()),
103 _ => true,
104 }
105 }
106}
107
108#[derive(Clone, Debug, PartialEq)]
110pub enum FieldType {
111 Scalar { size: usize },
113
114 MutableScalar { size: usize },
116
117 PtrToMemory { is_32_bit: bool, id: MemoryId, buffer_size: usize },
119
120 NullablePtrToMemory { is_32_bit: bool, id: MemoryId, buffer_size: usize },
122
123 PtrToArray { is_32_bit: bool, id: MemoryId },
126
127 PtrToEndArray { is_32_bit: bool, id: MemoryId },
130}
131
132#[derive(Clone, Debug, PartialEq)]
134pub struct FieldDescriptor {
135 pub offset: usize,
137
138 pub field_type: FieldType,
141}
142
143impl FieldDescriptor {
144 fn size(&self) -> usize {
145 match self.field_type {
146 FieldType::Scalar { size } | FieldType::MutableScalar { size } => size,
147 FieldType::PtrToMemory { is_32_bit, .. }
148 | FieldType::NullablePtrToMemory { is_32_bit, .. }
149 | FieldType::PtrToArray { is_32_bit, .. }
150 | FieldType::PtrToEndArray { is_32_bit, .. } => {
151 if is_32_bit {
152 4
153 } else {
154 8
155 }
156 }
157 }
158 }
159}
160
161#[derive(Clone, Copy, Debug, Eq, PartialEq)]
163struct Field {
164 offset: i16,
165 width: DataWidth,
166}
167
168impl Field {
169 fn new(offset: i16, width: DataWidth) -> Self {
170 Self { offset, width }
171 }
172}
173
174#[derive(Debug, PartialEq, Default)]
176pub struct StructDescriptor {
177 pub fields: Vec<FieldDescriptor>,
179}
180
181impl StructDescriptor {
182 fn is_subtype(&self, super_struct: &StructDescriptor) -> bool {
184 for super_field in super_struct.fields.iter() {
186 if self.fields.iter().find(|field| *field == super_field).is_none() {
187 return false;
188 }
189 }
190 true
191 }
192
193 fn find_field(&self, base_offset: ScalarValueData, field: Field) -> Option<&FieldDescriptor> {
196 let offset = base_offset + field.offset;
197 let field_desc = self.fields.iter().find(|f| {
198 f.offset <= offset.min() as usize && (offset.max() as usize) < f.offset + f.size()
199 })?;
200 let is_valid_load = match field_desc.field_type {
201 FieldType::Scalar { size } | FieldType::MutableScalar { size } => {
202 ((offset + field.width.bytes()).max() as usize) <= field_desc.offset + size
204 }
205 FieldType::PtrToMemory { is_32_bit, .. }
206 | FieldType::NullablePtrToMemory { is_32_bit, .. }
207 | FieldType::PtrToArray { is_32_bit, .. }
208 | FieldType::PtrToEndArray { is_32_bit, .. } => {
209 let expected_width = if is_32_bit { DataWidth::U32 } else { DataWidth::U64 };
210 offset.is_known()
212 && offset.value as usize == field_desc.offset
213 && field.width == expected_width
214 }
215 };
216
217 is_valid_load.then_some(field_desc)
218 }
219}
220
221#[derive(Clone, Debug, PartialEq)]
222pub enum MemoryParameterSize {
223 Value(u64),
225 Reference { index: u8 },
227}
228
229impl MemoryParameterSize {
230 fn size(&self, context: &ComputationContext) -> Result<u64, String> {
231 match self {
232 Self::Value(size) => Ok(*size),
233 Self::Reference { index } => {
234 let size_type = context.reg(index + 1)?;
235 match size_type {
236 Type::ScalarValue(data) if data.is_known() => Ok(data.value),
237 _ => Err("cannot know buffer size".to_string()),
238 }
239 }
240 }
241 }
242}
243
244#[derive(Clone, Debug, PartialEq)]
245pub enum MapTypeFilter {
246 AllowList(&'static [bpf_map_type]),
247 DenyList(&'static [bpf_map_type]),
248}
249
250impl MapTypeFilter {
251 pub fn is_allowed(&self, map_type: bpf_map_type) -> bool {
252 match self {
253 MapTypeFilter::AllowList(types) => types.contains(&map_type),
254 MapTypeFilter::DenyList(types) => !types.contains(&map_type),
255 }
256 }
257}
258
259#[derive(Clone, Debug, PartialEq)]
260pub enum Type {
261 ScalarValue(ScalarValueData),
263 ConstPtrToMap { id: u64, schema: MapSchema },
265 PtrToStack { offset: StackOffset },
267 PtrToMemory { id: MemoryId, offset: ScalarValueData, buffer_size: u64 },
270 PtrToStruct { id: MemoryId, offset: ScalarValueData, descriptor: Arc<StructDescriptor> },
272 PtrToArray { id: MemoryId, offset: ScalarValueData },
276 PtrToEndArray { id: MemoryId },
279 NullOr { id: MemoryId, inner: Box<Type> },
281 Releasable { id: MemoryId, inner: Box<Type> },
284 ScalarValueParameter,
286 ConstPtrToMapParameter { filter: MapTypeFilter },
288 MapKeyParameter {
290 map_ptr_index: u8,
293 },
294 MapValueParameter {
296 map_ptr_index: u8,
299 },
300 MemoryParameter {
302 size: MemoryParameterSize,
305 input: bool,
307 output: bool,
309 },
310 AliasParameter {
312 parameter_index: u8,
314 },
315 NullOrParameter(Box<Type>),
317 StructParameter { id: MemoryId },
319 ContextParameter { parameter_index: u8 },
321 ReleasableParameter { id: MemoryId, inner: Box<Type> },
324 ReleaseParameter { id: MemoryId },
326 AnyParameter,
328}
329
330impl PartialOrd for Type {
344 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
345 if self == other {
347 return Some(Ordering::Equal);
348 }
349
350 if self == &Type::UNINITIALIZED {
352 return Some(Ordering::Greater);
353 }
354 if other == &Type::UNINITIALIZED {
355 return Some(Ordering::Less);
356 }
357
358 match (self, other) {
360 (Self::ScalarValue(data1), Self::ScalarValue(data2)) => data1.partial_cmp(data2),
361 _ => None,
362 }
363 }
364}
365
366impl From<ScalarValueData> for Type {
367 fn from(value: ScalarValueData) -> Self {
368 Self::ScalarValue(value)
369 }
370}
371
372impl From<u64> for Type {
373 fn from(value: u64) -> Self {
374 Self::ScalarValue(value.into())
375 }
376}
377
378impl Default for Type {
379 fn default() -> Self {
381 Self::UNINITIALIZED.clone()
382 }
383}
384
385impl Type {
386 pub const UNINITIALIZED: Self = Self::ScalarValue(ScalarValueData::UNINITIALIZED);
388
389 pub const UNKNOWN_SCALAR: Self = Self::ScalarValue(ScalarValueData::UNKNOWN_WRITTEN);
391
392 fn mask(width: DataWidth) -> u64 {
394 if width == DataWidth::U64 { u64::MAX } else { (1 << width.bits()) - 1 }
395 }
396
397 fn is_written_scalar(&self) -> bool {
399 match self {
400 Self::ScalarValue(data) if data.is_fully_initialized() => true,
401 _ => false,
402 }
403 }
404
405 fn is_initialized(&self) -> bool {
407 match self {
408 Self::ScalarValue(data) => data.is_fully_initialized(),
409 _ => true,
410 }
411 }
412
413 pub fn is_subtype(&self, super_type: &Type) -> bool {
415 match (self, super_type) {
416 (_, Self::ScalarValue(data)) if data.is_uninitialized() => true,
418
419 (
420 Self::PtrToStruct { id: id1, offset: offset1, descriptor: descriptor1 },
421 Self::PtrToStruct { id: id2, offset: offset2, descriptor: descriptor2 },
422 ) => id1 == id2 && offset1 <= offset2 && descriptor1.is_subtype(descriptor2),
423
424 (self_type, super_type) if self_type == super_type => true,
426
427 _ => false,
428 }
429 }
430
431 pub fn is_non_zero(&self) -> bool {
433 match self {
434 Self::ScalarValue(d) => d.min() > 0,
435 Self::NullOr { .. } => false,
436 _ => true,
437 }
438 }
439
440 fn inner(&self, context: &ComputationContext) -> Result<&Type, String> {
441 match self {
442 Self::Releasable { id, inner } => {
443 if context.resources.contains(id) {
444 Ok(&inner)
445 } else {
446 Err("Access to released resource".to_string())
447 }
448 }
449 _ => Ok(self),
450 }
451 }
452
453 fn constraint(
456 context: &mut ComputationContext,
457 jump_type: JumpType,
458 jump_width: JumpWidth,
459 type1: Self,
460 type2: Self,
461 ) -> Result<(Self, Self), String> {
462 let result = match (jump_width, jump_type, type1.inner(context)?, type2.inner(context)?) {
463 (JumpWidth::W64, JumpType::Eq, Type::ScalarValue(data1), Type::ScalarValue(data2))
464 if data1.is_fully_initialized() && data2.is_fully_initialized() =>
465 {
466 let umin = std::cmp::max(data1.min(), data2.min());
467 let umax = std::cmp::min(data1.max(), data2.max());
468 let v = Type::ScalarValue(ScalarValueData::new(
469 data1.value | data2.value,
470 data1.unknown_mask & data2.unknown_mask,
471 0,
472 U64Range::new(umin, umax),
473 ));
474 (v.clone(), v)
475 }
476 (JumpWidth::W32, JumpType::Eq, Type::ScalarValue(data1), Type::ScalarValue(data2))
477 if data1.is_fully_initialized() && data2.is_fully_initialized() =>
478 {
479 let maybe_umin = if data1.min() <= U32_MAX && data2.min() <= U32_MAX {
480 Some(std::cmp::max(data1.min(), data2.min()))
481 } else {
482 None
483 };
484 let maybe_umax = if data1.max() <= U32_MAX && data2.max() <= U32_MAX {
485 Some(std::cmp::min(data1.max(), data2.max()))
486 } else {
487 None
488 };
489
490 let urange1 = U64Range::new(
491 maybe_umin.unwrap_or_else(|| data1.min()),
492 maybe_umax.unwrap_or_else(|| data1.max()),
493 );
494 let v1 = Type::ScalarValue(ScalarValueData::new(
495 data1.value | (data2.value & U32_MAX),
496 data1.unknown_mask & (data2.unknown_mask | (U32_MAX << 32)),
497 0,
498 urange1,
499 ));
500
501 let urange2 = U64Range::new(
502 maybe_umin.unwrap_or_else(|| data2.min()),
503 maybe_umax.unwrap_or_else(|| data2.max()),
504 );
505 let v2 = Type::ScalarValue(ScalarValueData::new(
506 data2.value | (data1.value & U32_MAX),
507 data2.unknown_mask & (data1.unknown_mask | (U32_MAX << 32)),
508 0,
509 urange2,
510 ));
511 (v1, v2)
512 }
513 (JumpWidth::W64, JumpType::Eq, Type::ScalarValue(data), Type::NullOr { id, .. })
514 | (JumpWidth::W64, JumpType::Eq, Type::NullOr { id, .. }, Type::ScalarValue(data))
515 if data.is_zero() =>
516 {
517 context.set_null(id, true);
518 let zero = Type::from(0);
519 (zero.clone(), zero)
520 }
521 (JumpWidth::W64, jump_type, Type::NullOr { id, inner }, Type::ScalarValue(data))
522 if jump_type.is_strict() && data.is_zero() =>
523 {
524 context.set_null(id, false);
525 let inner = *inner.clone();
526 inner.register_resource(context);
527 (inner, type2)
528 }
529 (JumpWidth::W64, jump_type, Type::ScalarValue(data), Type::NullOr { id, inner })
530 if jump_type.is_strict() && data.is_zero() =>
531 {
532 context.set_null(id, false);
533 let inner = *inner.clone();
534 inner.register_resource(context);
535 (type1, inner)
536 }
537
538 (JumpWidth::W64, JumpType::Lt, Type::ScalarValue(lhs), Type::ScalarValue(rhs)) => {
539 debug_assert!(lhs.min() < u64::MAX);
540 debug_assert!(rhs.max() > 0);
541 let new_max_lhs = std::cmp::min(lhs.max(), rhs.max() - 1);
542 debug_assert!(lhs.min() <= new_max_lhs);
543 let new_min_rhs = std::cmp::max(rhs.min(), lhs.min() + 1);
544 debug_assert!(new_min_rhs <= rhs.max());
545 let new_range_lhs = U64Range::new(lhs.min(), new_max_lhs);
546 let new_range_rhs = U64Range::new(new_min_rhs, rhs.max());
547 (lhs.update_range(new_range_lhs).into(), rhs.update_range(new_range_rhs).into())
548 }
549 (JumpWidth::W64, JumpType::Gt, Type::ScalarValue(lhs), Type::ScalarValue(rhs)) => {
550 debug_assert!(rhs.min() < u64::MAX);
551 debug_assert!(lhs.max() > 0);
552 let new_min_lhs = std::cmp::max(lhs.min(), rhs.min() + 1);
553 debug_assert!(new_min_lhs <= lhs.max());
554 let new_max_rhs = std::cmp::min(rhs.max(), lhs.max() - 1);
555 debug_assert!(rhs.min() <= new_max_rhs);
556 let new_range_lhs = U64Range::new(new_min_lhs, lhs.max());
557 let new_range_rhs = U64Range::new(rhs.min(), new_max_rhs);
558 (lhs.update_range(new_range_lhs).into(), rhs.update_range(new_range_rhs).into())
559 }
560
561 (JumpWidth::W64, JumpType::Le, Type::ScalarValue(lhs), Type::ScalarValue(rhs)) => {
562 let new_max_lhs = std::cmp::min(lhs.max(), rhs.max());
563 debug_assert!(lhs.min() <= new_max_lhs);
564 let new_min_rhs = std::cmp::max(rhs.min(), lhs.min());
565 debug_assert!(new_min_rhs <= rhs.max());
566 let new_range_lhs = U64Range::new(lhs.min(), new_max_lhs);
567 let new_range_rhs = U64Range::new(new_min_rhs, rhs.max());
568 (lhs.update_range(new_range_lhs).into(), rhs.update_range(new_range_rhs).into())
569 }
570 (JumpWidth::W64, JumpType::Ge, Type::ScalarValue(lhs), Type::ScalarValue(rhs)) => {
571 let new_min_lhs = std::cmp::max(lhs.min(), rhs.min());
572 debug_assert!(new_min_lhs <= lhs.max());
573 let new_max_rhs = std::cmp::min(rhs.max(), lhs.max());
574 debug_assert!(rhs.min() <= new_max_rhs);
575 let new_range_lhs = U64Range::new(new_min_lhs, lhs.max());
576 let new_range_rhs = U64Range::new(rhs.min(), new_max_rhs);
577 (lhs.update_range(new_range_lhs).into(), rhs.update_range(new_range_rhs).into())
578 }
579
580 (
581 JumpWidth::W64,
582 JumpType::Eq,
583 Type::PtrToArray { id: id1, offset },
584 Type::PtrToEndArray { id: id2 },
585 )
586 | (
587 JumpWidth::W64,
588 JumpType::Le,
589 Type::PtrToArray { id: id1, offset },
590 Type::PtrToEndArray { id: id2 },
591 )
592 | (
593 JumpWidth::W64,
594 JumpType::Ge,
595 Type::PtrToEndArray { id: id1 },
596 Type::PtrToArray { id: id2, offset },
597 ) if id1 == id2 => {
598 context.update_array_bounds(id1.clone(), *offset);
599 (type1, type2)
600 }
601 (
602 JumpWidth::W64,
603 JumpType::Lt,
604 Type::PtrToArray { id: id1, offset },
605 Type::PtrToEndArray { id: id2 },
606 )
607 | (
608 JumpWidth::W64,
609 JumpType::Gt,
610 Type::PtrToEndArray { id: id1 },
611 Type::PtrToArray { id: id2, offset },
612 ) if id1 == id2 => {
613 context.update_array_bounds(id1.clone(), *offset + 1);
614 (type1, type2)
615 }
616 (JumpWidth::W64, JumpType::Eq, _, _) => (type1.clone(), type1),
617 _ => (type1, type2),
618 };
619 Ok(result)
620 }
621
622 fn match_parameter_type(
623 &self,
624 verification_context: &VerificationContext<'_>,
625 context: &ComputationContext,
626 helper_name: &str,
627 parameter_type: &Type,
628 index: usize,
629 next: &mut ComputationContext,
630 ) -> Result<(), String> {
631 match (parameter_type, self) {
632 (Type::NullOrParameter(t), Type::ScalarValue(data))
633 if data.is_known() && data.value == 0 =>
634 {
635 Ok(())
636 }
637 (Type::NullOrParameter(t), _) => self.match_parameter_type(
638 verification_context,
639 context,
640 helper_name,
641 t,
642 index,
643 next,
644 ),
645 (Type::ScalarValueParameter, Type::ScalarValue(data))
646 if data.is_fully_initialized() =>
647 {
648 Ok(())
649 }
650 (Type::ConstPtrToMapParameter { filter }, Type::ConstPtrToMap { schema, .. }) => {
651 let map_type = schema.map_type;
652 if !filter.is_allowed(map_type) {
653 return Err(format!("Map type {map_type} not allowed in {helper_name}"));
654 }
655 Ok(())
656 }
657 (
658 Type::MapKeyParameter { map_ptr_index },
659 Type::PtrToMemory { offset, buffer_size, .. },
660 ) => {
661 let schema = context.get_map_schema(*map_ptr_index)?;
662 context.check_memory_access(*offset, *buffer_size, 0, schema.key_size as usize)
663 }
664 (Type::MapKeyParameter { map_ptr_index }, Type::PtrToStack { offset }) => {
665 let schema = context.get_map_schema(*map_ptr_index)?;
666 context.stack.read_data_ptr(context.pc, *offset, schema.key_size as u64)
667 }
668 (
669 Type::MapValueParameter { map_ptr_index },
670 Type::PtrToMemory { offset, buffer_size, .. },
671 ) => {
672 let schema = context.get_map_schema(*map_ptr_index)?;
673 context.check_memory_access(*offset, *buffer_size, 0, schema.value_size as usize)
674 }
675 (Type::MapValueParameter { map_ptr_index }, Type::PtrToStack { offset }) => {
676 let schema = context.get_map_schema(*map_ptr_index)?;
677 context.stack.read_data_ptr(context.pc, *offset, schema.value_size as u64)
678 }
679 (Type::MemoryParameter { size, .. }, Type::PtrToMemory { offset, buffer_size, .. }) => {
680 let expected_size = size.size(context)?;
681 let offset_max = offset.max();
682 if offset_max > *buffer_size {
683 return Err("out of bound read".to_string());
684 }
685 let size_left = *buffer_size - offset_max;
686 if expected_size > size_left {
687 return Err("out of bound read".to_string());
688 }
689 Ok(())
690 }
691
692 (Type::MemoryParameter { size, input, output }, Type::PtrToStack { offset }) => {
693 let size = size.size(context)?;
694 let buffer_end = offset.add(size);
695 if !buffer_end.is_valid() {
696 Err("out of bound access".to_string())
697 } else {
698 if *output {
699 next.stack.write_data_ptr(context.pc, *offset, size)?;
700 }
701 if *input {
702 context.stack.read_data_ptr(context.pc, *offset, size)?;
703 }
704 Ok(())
705 }
706 }
707 (
708 Type::StructParameter { id: id1 },
709 Type::PtrToMemory { id: id2, offset, .. }
710 | Type::PtrToStruct { id: id2, offset, .. },
711 ) if offset.is_zero() && id1.matches(id2) => Ok(()),
712 (
713 Type::ReleasableParameter { id: id1, inner: inner1 },
714 Type::Releasable { id: id2, inner: inner2 },
715 ) if id2.has_parent(id1) => {
716 if next.resources.contains(id2) {
717 inner2.match_parameter_type(
718 verification_context,
719 context,
720 helper_name,
721 inner1,
722 index,
723 next,
724 )
725 } else {
726 Err(format!("Resource already released for index {index}"))
727 }
728 }
729 (Type::ContextParameter { parameter_index }, arg_type) => {
730 if verification_context.calling_context.args.get(*parameter_index as usize)
731 == Some(arg_type)
732 {
733 Ok(())
734 } else {
735 Err(format!("Helper expects program argument {parameter_index}"))
736 }
737 }
738 (Type::ReleaseParameter { id: id1 }, Type::Releasable { id: id2, .. })
739 if id2.has_parent(id1) =>
740 {
741 if next.resources.remove(id2) {
742 Ok(())
743 } else {
744 Err(format!("{id2:?} Resource already released for index {index}"))
745 }
746 }
747 (_, Type::Releasable { inner, .. }) => inner.match_parameter_type(
748 verification_context,
749 context,
750 helper_name,
751 parameter_type,
752 index,
753 next,
754 ),
755 (Type::AnyParameter, _) => Ok(()),
756
757 _ => Err(format!("incorrect parameter for index {index}")),
758 }
759 }
760
761 fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
764 match self {
765 Type::NullOr { id, inner } if id == null_id => {
766 if is_null {
767 *self = Type::from(0);
768 } else {
769 *self = *inner.clone();
770 }
771 }
772 _ => {}
773 }
774 }
775
776 fn register_resource(&self, context: &mut ComputationContext) {
777 match self {
778 Type::Releasable { id, .. } => {
779 context.resources.insert(id.clone());
780 }
781 _ => {}
782 }
783 }
784
785 fn compare_list<'a>(
793 mut l1: impl Iterator<Item = &'a Self>,
794 mut l2: impl Iterator<Item = &'a Self>,
795 ) -> Option<Ordering> {
796 let mut result = Ordering::Equal;
797 loop {
798 match (l1.next(), l2.next()) {
799 (None, None) => return Some(result),
800 (_, None) | (None, _) => return None,
801 (Some(v1), Some(v2)) => {
802 result = associate_orderings(result, v1.partial_cmp(v2)?)?;
803 }
804 }
805 }
806 }
807}
808
809#[derive(Clone, Debug)]
810pub struct FunctionSignature {
811 pub args: Vec<Type>,
812 pub return_value: Type,
813 pub invalidate_array_bounds: bool,
814}
815
816#[derive(Clone, Debug)]
817pub struct HelperDefinition {
818 pub index: u32,
819 pub name: &'static str,
820 pub signature: FunctionSignature,
821}
822
823#[derive(Debug, Default)]
824pub struct CallingContext {
825 pub maps: Vec<MapSchema>,
828 pub helpers: HashMap<u32, &'static HelperDefinition>,
830 pub args: Vec<Type>,
832 pub packet_type: Option<Type>,
834}
835
836impl CallingContext {
837 pub fn register_map(&mut self, schema: MapSchema) -> usize {
838 let index = self.maps.len();
839 self.maps.push(schema);
840 index
841 }
842}
843
844#[derive(Debug, PartialEq, Clone)]
845pub struct StructAccess {
846 pub pc: ProgramCounter,
847
848 pub memory_id: MemoryId,
850
851 pub field_offset: usize,
853
854 pub is_32_bit_ptr_load: bool,
856}
857
858#[derive(Debug, Clone)]
859pub struct VerifiedEbpfProgram {
860 pub(crate) code: Vec<EbpfInstruction>,
861 pub(crate) args: Vec<Type>,
862 pub(crate) struct_access_instructions: Vec<StructAccess>,
863 pub(crate) maps: Vec<MapSchema>,
864}
865
866impl VerifiedEbpfProgram {
867 pub fn to_code(self) -> Vec<EbpfInstruction> {
870 debug_assert!(self.struct_access_instructions.is_empty());
871 debug_assert!(self.maps.is_empty());
872 self.code
873 }
874
875 pub fn code(&self) -> &[EbpfInstruction] {
876 &self.code
877 }
878
879 pub fn struct_access_instructions(&self) -> &[StructAccess] {
880 &self.struct_access_instructions
881 }
882
883 pub fn from_verified_code(
884 code: Vec<EbpfInstruction>,
885 args: Vec<Type>,
886 struct_access_instructions: Vec<StructAccess>,
887 maps: Vec<MapSchema>,
888 ) -> Self {
889 Self { code, args, struct_access_instructions, maps }
890 }
891
892 pub fn maps(&self) -> &[MapSchema] {
893 &self.maps
894 }
895}
896
897pub fn verify_program(
900 code: Vec<EbpfInstruction>,
901 calling_context: CallingContext,
902 logger: &mut dyn VerifierLogger,
903) -> Result<VerifiedEbpfProgram, EbpfError> {
904 if code.len() > BPF_MAX_INSTS {
905 return error_and_log(logger, "ebpf program too long");
906 }
907
908 let mut context = ComputationContext::default();
909 for (i, t) in calling_context.args.iter().enumerate() {
910 context.set_reg((i + 1) as u8, t.clone()).map_err(EbpfError::ProgramVerifyError)?;
912 }
913 let states = vec![context];
914 let mut verification_context = VerificationContext {
915 calling_context,
916 logger,
917 states,
918 code: &code,
919 counter: 0,
920 iteration: 0,
921 terminating_contexts: Default::default(),
922 struct_access_instructions: Default::default(),
923 };
924 while let Some(mut context) = verification_context.states.pop() {
925 if let Some(terminating_contexts) =
926 verification_context.terminating_contexts.get(&context.pc)
927 {
928 if let Some(ending_context) =
931 terminating_contexts.iter().find(|c| c.computation_context >= context)
932 {
933 if let Some(parent) = context.parent.take() {
937 parent.dependencies.lock().push(ending_context.dependencies.clone());
938 if let Some(parent) = Arc::into_inner(parent) {
939 parent
940 .terminate(&mut verification_context)
941 .map_err(EbpfError::ProgramVerifyError)?;
942 }
943 }
944 continue;
945 }
946 }
947 if verification_context.iteration > 10 * BPF_MAX_INSTS {
948 return error_and_log(verification_context.logger, "bpf byte code does not terminate");
949 }
950 if context.pc >= code.len() {
951 return error_and_log(verification_context.logger, "pc out of bounds");
952 }
953 let visit_result = context.visit(&mut verification_context, code[context.pc]);
954 match visit_result {
955 Err(message) => {
956 let message = format!("at PC {}: {}", context.pc, message);
957 return error_and_log(verification_context.logger, message);
958 }
959 _ => {}
960 }
961 verification_context.iteration += 1;
962 }
963
964 let struct_access_instructions =
965 verification_context.struct_access_instructions.into_values().collect::<Vec<_>>();
966 let CallingContext { maps, args, .. } = verification_context.calling_context;
967 Ok(VerifiedEbpfProgram { code, struct_access_instructions, maps, args })
968}
969
970struct VerificationContext<'a> {
971 calling_context: CallingContext,
973 logger: &'a mut dyn VerifierLogger,
975 states: Vec<ComputationContext>,
977 code: &'a [EbpfInstruction],
979 counter: u64,
981 iteration: usize,
984 terminating_contexts: BTreeMap<ProgramCounter, Vec<TerminatingContext>>,
988 struct_access_instructions: HashMap<ProgramCounter, StructAccess>,
992}
993
994impl<'a> VerificationContext<'a> {
995 fn next_id(&mut self) -> u64 {
996 let id = self.counter;
997 self.counter += 1;
998 id
999 }
1000
1001 fn register_struct_access(&mut self, struct_access: StructAccess) -> Result<(), String> {
1004 match self.struct_access_instructions.entry(struct_access.pc) {
1005 std::collections::hash_map::Entry::Vacant(entry) => {
1006 entry.insert(struct_access);
1007 }
1008 std::collections::hash_map::Entry::Occupied(entry) => {
1009 if *entry.get() != struct_access {
1010 return Err("Inconsistent struct field access".to_string());
1011 }
1012 }
1013 }
1014 Ok(())
1015 }
1016}
1017
1018const STACK_ELEMENT_SIZE: usize = std::mem::size_of::<u64>();
1019const STACK_MAX_INDEX: usize = BPF_STACK_SIZE / STACK_ELEMENT_SIZE;
1020
1021#[derive(Clone, Copy, Debug, PartialEq)]
1024pub struct StackOffset(ScalarValueData);
1025
1026impl Default for StackOffset {
1027 fn default() -> Self {
1028 Self(BPF_STACK_SIZE.into())
1029 }
1030}
1031
1032impl StackOffset {
1033 fn is_valid(&self) -> bool {
1035 self.0.is_known() && self.0.value <= (BPF_STACK_SIZE as u64)
1036 }
1037
1038 fn reg(&self) -> ScalarValueData {
1040 self.0
1041 }
1042
1043 fn array_index(&self) -> usize {
1046 debug_assert!(self.is_valid());
1047 usize::try_from(self.0.value).unwrap() / STACK_ELEMENT_SIZE
1048 }
1049
1050 fn sub_index(&self) -> usize {
1052 debug_assert!(self.is_valid());
1053 usize::try_from(self.0.value).unwrap() % STACK_ELEMENT_SIZE
1054 }
1055
1056 fn add<T: Into<ScalarValueData>>(self, rhs: T) -> Self {
1057 Self(self.0 + rhs)
1058 }
1059}
1060
1061#[derive(Clone, Debug, Default, PartialEq)]
1063struct Stack {
1064 data: HashMap<usize, Type>,
1065}
1066
1067impl Stack {
1068 fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
1071 for (_, t) in self.data.iter_mut() {
1072 t.set_null(null_id, is_null);
1073 }
1074 }
1075
1076 fn get(&self, index: usize) -> &Type {
1077 self.data.get(&index).unwrap_or(&Type::UNINITIALIZED)
1078 }
1079
1080 fn set(&mut self, index: usize, t: Type) {
1081 if t == Type::UNINITIALIZED {
1082 self.data.remove(&index);
1083 } else {
1084 self.data.insert(index, t);
1085 }
1086 }
1087
1088 fn extract_sub_value(value: u64, offset: usize, byte_count: usize) -> u64 {
1089 NativeEndian::read_uint(&value.as_bytes()[offset..], byte_count)
1090 }
1091
1092 fn insert_sub_value(mut original: u64, value: u64, width: DataWidth, offset: usize) -> u64 {
1093 let byte_count = width.bytes();
1094 let original_buf = original.as_mut_bytes();
1095 let value_buf = value.as_bytes();
1096 original_buf[offset..(byte_count + offset)].copy_from_slice(&value_buf[..byte_count]);
1097 original
1098 }
1099
1100 fn write_data_ptr(
1101 &mut self,
1102 pc: ProgramCounter,
1103 mut offset: StackOffset,
1104 bytes: u64,
1105 ) -> Result<(), String> {
1106 for i in 0..bytes {
1107 self.store(offset, Type::UNKNOWN_SCALAR, DataWidth::U8)?;
1108 offset = offset.add(1);
1109 }
1110 Ok(())
1111 }
1112
1113 fn read_data_ptr(
1114 &self,
1115 pc: ProgramCounter,
1116 offset: StackOffset,
1117 bytes: u64,
1118 ) -> Result<(), String> {
1119 let read_element =
1120 |index: usize, start_offset: usize, end_offset: usize| -> Result<(), String> {
1121 match self.get(index) {
1122 Type::ScalarValue(data) => {
1123 debug_assert!(end_offset > start_offset);
1124 let unwritten_bits = Self::extract_sub_value(
1125 data.unwritten_mask,
1126 start_offset,
1127 end_offset - start_offset,
1128 );
1129 if unwritten_bits == 0 {
1130 Ok(())
1131 } else {
1132 Err("reading unwritten value from the stack".to_string())
1133 }
1134 }
1135 _ => Err("invalid read from the stack".to_string()),
1136 }
1137 };
1138 if bytes == 0 {
1139 return Ok(());
1140 }
1141
1142 if bytes as usize > BPF_STACK_SIZE {
1143 return Err("stack overflow".to_string());
1144 }
1145
1146 if !offset.is_valid() {
1147 return Err("invalid stack offset".to_string());
1148 }
1149
1150 let end_offset = offset.add(bytes);
1151 if !end_offset.is_valid() {
1152 return Err("stack overflow".to_string())?;
1153 }
1154
1155 if offset.array_index() == end_offset.array_index() {
1159 return read_element(offset.array_index(), offset.sub_index(), end_offset.sub_index());
1160 }
1161
1162 read_element(offset.array_index(), offset.sub_index(), STACK_ELEMENT_SIZE)?;
1164
1165 if end_offset.sub_index() != 0 {
1167 read_element(end_offset.array_index(), 0, end_offset.sub_index())?;
1168 }
1169
1170 for i in (offset.array_index() + 1)..end_offset.array_index() {
1172 read_element(i, 0, STACK_ELEMENT_SIZE)?;
1173 }
1174
1175 Ok(())
1176 }
1177
1178 fn store(&mut self, offset: StackOffset, value: Type, width: DataWidth) -> Result<(), String> {
1179 if !offset.is_valid() {
1180 return Err("out of bounds store".to_string());
1181 }
1182 if offset.sub_index() % width.bytes() != 0 {
1183 return Err("misaligned access".to_string());
1184 }
1185
1186 let index = offset.array_index();
1187 if width == DataWidth::U64 {
1188 self.set(index, value);
1189 } else {
1190 match value {
1191 Type::ScalarValue(data) => {
1192 let old_data = match self.get(index) {
1193 Type::ScalarValue(data) => *data,
1194 _ => {
1195 ScalarValueData::UNINITIALIZED
1198 }
1199 };
1200 let sub_index = offset.sub_index();
1201 let value =
1202 Self::insert_sub_value(old_data.value, data.value, width, sub_index);
1203 let unknown_mask = Self::insert_sub_value(
1204 old_data.unknown_mask,
1205 data.unknown_mask,
1206 width,
1207 sub_index,
1208 );
1209 let unwritten_mask = Self::insert_sub_value(
1210 old_data.unwritten_mask,
1211 data.unwritten_mask,
1212 width,
1213 sub_index,
1214 );
1215 let urange = U64Range::compute_range_for_bytes_swap(
1216 old_data.urange,
1217 data.urange,
1218 sub_index,
1219 0,
1220 width.bytes(),
1221 );
1222 self.set(
1223 index,
1224 Type::ScalarValue(ScalarValueData::new(
1225 value,
1226 unknown_mask,
1227 unwritten_mask,
1228 urange,
1229 )),
1230 );
1231 }
1232 _ => {
1233 return Err("cannot store part of a non scalar value on the stack".to_string());
1234 }
1235 }
1236 }
1237 Ok(())
1238 }
1239
1240 fn load(&self, offset: StackOffset, width: DataWidth) -> Result<Type, String> {
1241 if !offset.is_valid() {
1242 return Err("out of bounds load".to_string());
1243 }
1244 if offset.array_index() >= STACK_MAX_INDEX {
1245 return Err("out of bounds load".to_string());
1246 }
1247 if offset.sub_index() % width.bytes() != 0 {
1248 return Err("misaligned access".to_string());
1249 }
1250
1251 let index = offset.array_index();
1252 let loaded_type = self.get(index).clone();
1253 let result = if width == DataWidth::U64 {
1254 loaded_type
1255 } else {
1256 match loaded_type {
1257 Type::ScalarValue(data) => {
1258 let sub_index = offset.sub_index();
1259 let value = Self::extract_sub_value(data.value, sub_index, width.bytes());
1260 let unknown_mask =
1261 Self::extract_sub_value(data.unknown_mask, sub_index, width.bytes());
1262 let unwritten_mask =
1263 Self::extract_sub_value(data.unwritten_mask, sub_index, width.bytes());
1264 let urange = U64Range::compute_range_for_bytes_swap(
1265 0.into(),
1266 data.urange,
1267 0,
1268 sub_index,
1269 width.bytes(),
1270 );
1271 Type::ScalarValue(ScalarValueData::new(
1272 value,
1273 unknown_mask,
1274 unwritten_mask,
1275 urange,
1276 ))
1277 }
1278 _ => return Err(format!("incorrect load of {} bytes", width.bytes())),
1279 }
1280 };
1281 if !result.is_initialized() {
1282 return Err("reading unwritten value from the stack".to_string());
1283 }
1284 Ok(result)
1285 }
1286}
1287
1288impl PartialOrd for Stack {
1291 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1292 let mut result = Ordering::Equal;
1293 let mut data_iter1 = self.data.iter().peekable();
1294 let mut data_iter2 = other.data.iter().peekable();
1295 loop {
1296 let k1 = data_iter1.peek().map(|(k, _)| *k);
1297 let k2 = data_iter2.peek().map(|(k, _)| *k);
1298 let k = match (k1, k2) {
1299 (None, None) => return Some(result),
1300 (Some(k), None) => {
1301 data_iter1.next();
1302 *k
1303 }
1304 (None, Some(k)) => {
1305 data_iter2.next();
1306 *k
1307 }
1308 (Some(k1), Some(k2)) => {
1309 if k1 <= k2 {
1310 data_iter1.next();
1311 }
1312 if k2 <= k1 {
1313 data_iter2.next();
1314 }
1315 *std::cmp::min(k1, k2)
1316 }
1317 };
1318 result = associate_orderings(result, self.get(k).partial_cmp(other.get(k))?)?;
1319 }
1320 }
1321}
1322
1323macro_rules! bpf_log {
1324 ($context:ident, $verification_context:ident, $($msg:tt)*) => {
1325 let prefix = format!("{}: ({:02x})", $context.pc, $verification_context.code[$context.pc].code());
1326 let suffix = format!($($msg)*);
1327 $verification_context.logger.log(format!("{prefix} {suffix}").as_bytes());
1328 }
1329}
1330
1331#[derive(Debug, Default)]
1333struct ComputationContext {
1334 pc: ProgramCounter,
1336 registers: [Type; GENERAL_REGISTER_COUNT as usize],
1338 stack: Stack,
1340 array_bounds: BTreeMap<MemoryId, u64>,
1342 resources: HashSet<MemoryId>,
1344 parent: Option<Arc<ComputationContext>>,
1346 dependencies: Mutex<Vec<DataDependencies>>,
1349}
1350
1351impl Clone for ComputationContext {
1352 fn clone(&self) -> Self {
1353 Self {
1354 pc: self.pc,
1355 registers: self.registers.clone(),
1356 stack: self.stack.clone(),
1357 array_bounds: self.array_bounds.clone(),
1358 resources: self.resources.clone(),
1359 parent: self.parent.clone(),
1360 dependencies: Default::default(),
1363 }
1364 }
1365}
1366
1367impl PartialEq for ComputationContext {
1369 fn eq(&self, other: &Self) -> bool {
1370 self.pc == other.pc
1371 && self.registers == other.registers
1372 && self.stack == other.stack
1373 && self.array_bounds == other.array_bounds
1374 }
1375}
1376
1377impl ComputationContext {
1378 fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
1381 for i in 0..self.registers.len() {
1382 self.registers[i].set_null(null_id, is_null);
1383 }
1384 self.stack.set_null(null_id, is_null);
1385 }
1386
1387 fn reg(&self, index: Register) -> Result<Type, String> {
1388 if index >= REGISTER_COUNT {
1389 return Err(format!("R{index} is invalid"));
1390 }
1391 if index < GENERAL_REGISTER_COUNT {
1392 Ok(self.registers[index as usize].clone())
1393 } else {
1394 Ok(Type::PtrToStack { offset: StackOffset::default() })
1395 }
1396 }
1397
1398 fn set_reg(&mut self, index: Register, reg_type: Type) -> Result<(), String> {
1399 if index >= GENERAL_REGISTER_COUNT {
1400 return Err(format!("R{index} is invalid"));
1401 }
1402 self.registers[index as usize] = reg_type;
1403 Ok(())
1404 }
1405
1406 fn update_array_bounds(&mut self, id: MemoryId, new_bound: ScalarValueData) {
1407 let new_bound_min = new_bound.min();
1408 self.array_bounds
1409 .entry(id)
1410 .and_modify(|v| *v = std::cmp::max(*v, new_bound_min))
1411 .or_insert(new_bound_min);
1412 }
1413
1414 fn get_map_schema(&self, argument: u8) -> Result<MapSchema, String> {
1415 match self.reg(argument + 1)? {
1416 Type::ConstPtrToMap { schema, .. } => Ok(schema),
1417 _ => Err(format!("No map found at argument {argument}")),
1418 }
1419 }
1420
1421 fn next(&self) -> Result<Self, String> {
1422 let parent = Some(Arc::new(self.clone()));
1423 self.jump_with_offset(0, parent)
1424 }
1425
1426 fn jump_with_offset(&self, offset: i16, parent: Option<Arc<Self>>) -> Result<Self, String> {
1429 let pc = self
1430 .pc
1431 .checked_add_signed(offset.into())
1432 .and_then(|v| v.checked_add_signed(1))
1433 .ok_or_else(|| "jump outside of program".to_string())?;
1434 let result = Self {
1435 pc,
1436 registers: self.registers.clone(),
1437 stack: self.stack.clone(),
1438 array_bounds: self.array_bounds.clone(),
1439 resources: self.resources.clone(),
1440 parent,
1441 dependencies: Default::default(),
1442 };
1443 Ok(result)
1444 }
1445
1446 fn check_memory_access(
1447 &self,
1448 dst_offset: ScalarValueData,
1449 dst_buffer_size: u64,
1450 instruction_offset: i16,
1451 width: usize,
1452 ) -> Result<(), String> {
1453 let memory_range = dst_offset.urange + instruction_offset + U64Range::new(0, width as u64);
1454 if memory_range.max > dst_buffer_size {
1455 return Err("out of bound access".to_string());
1456 }
1457 Ok(())
1458 }
1459
1460 fn store_memory(
1461 &mut self,
1462 context: &mut VerificationContext<'_>,
1463 addr: &Type,
1464 field: Field,
1465 value: Type,
1466 ) -> Result<(), String> {
1467 let addr = addr.inner(self)?;
1468 match *addr {
1469 Type::PtrToStack { offset } => {
1470 let offset_sum = offset.add(field.offset);
1471 return self.stack.store(offset_sum, value, field.width);
1472 }
1473 Type::PtrToMemory { offset, buffer_size, .. } => {
1474 self.check_memory_access(offset, buffer_size, field.offset, field.width.bytes())?;
1475 }
1476 Type::PtrToStruct { ref id, offset, ref descriptor, .. } => {
1477 let field_desc = descriptor
1478 .find_field(offset, field)
1479 .ok_or_else(|| "incorrect store".to_string())?;
1480
1481 if !matches!(field_desc.field_type, FieldType::MutableScalar { .. }) {
1482 return Err("store to a read-only field".to_string());
1483 }
1484
1485 context.register_struct_access(StructAccess {
1486 pc: self.pc,
1487 memory_id: id.clone(),
1488 field_offset: field_desc.offset,
1489 is_32_bit_ptr_load: false,
1490 })?;
1491 }
1492 Type::PtrToArray { ref id, offset } => {
1493 self.check_memory_access(
1494 offset,
1495 *self.array_bounds.get(&id).unwrap_or(&0),
1496 field.offset,
1497 field.width.bytes(),
1498 )?;
1499 }
1500 _ => return Err("incorrect store".to_string()),
1501 }
1502
1503 match value {
1504 Type::ScalarValue(data) if data.is_fully_initialized() => {}
1505 _ => return Err("incorrect store".to_string()),
1507 }
1508 Ok(())
1509 }
1510
1511 fn load_memory(
1512 &self,
1513 context: &mut VerificationContext<'_>,
1514 addr: &Type,
1515 field: Field,
1516 ) -> Result<Type, String> {
1517 let addr = addr.inner(self)?;
1518 match *addr {
1519 Type::PtrToStack { offset } => {
1520 let offset_sum = offset.add(field.offset);
1521 self.stack.load(offset_sum, field.width)
1522 }
1523 Type::PtrToMemory { ref id, offset, buffer_size, .. } => {
1524 self.check_memory_access(offset, buffer_size, field.offset, field.width.bytes())?;
1525 Ok(Type::UNKNOWN_SCALAR)
1526 }
1527 Type::PtrToStruct { ref id, offset, ref descriptor, .. } => {
1528 let field_desc = descriptor
1529 .find_field(offset, field)
1530 .ok_or_else(|| "incorrect load".to_string())?;
1531
1532 let (return_type, is_32_bit_ptr_load) = match &field_desc.field_type {
1533 FieldType::Scalar { .. } | FieldType::MutableScalar { .. } => {
1534 (Type::UNKNOWN_SCALAR, false)
1535 }
1536 FieldType::PtrToArray { id: array_id, is_32_bit } => (
1537 Type::PtrToArray { id: array_id.prepended(id.clone()), offset: 0.into() },
1538 *is_32_bit,
1539 ),
1540 FieldType::PtrToEndArray { id: array_id, is_32_bit } => {
1541 (Type::PtrToEndArray { id: array_id.prepended(id.clone()) }, *is_32_bit)
1542 }
1543 FieldType::PtrToMemory { id: memory_id, buffer_size, is_32_bit } => (
1544 Type::PtrToMemory {
1545 id: memory_id.prepended(id.clone()),
1546 offset: 0.into(),
1547 buffer_size: *buffer_size as u64,
1548 },
1549 *is_32_bit,
1550 ),
1551 FieldType::NullablePtrToMemory { id: memory_id, buffer_size, is_32_bit } => {
1552 let id = memory_id.prepended(id.clone());
1553 (
1554 Type::NullOr {
1555 id: id.clone(),
1556 inner: Box::new(Type::PtrToMemory {
1557 id,
1558 offset: 0.into(),
1559 buffer_size: *buffer_size as u64,
1560 }),
1561 },
1562 *is_32_bit,
1563 )
1564 }
1565 };
1566
1567 context.register_struct_access(StructAccess {
1568 pc: self.pc,
1569 memory_id: id.clone(),
1570 field_offset: field_desc.offset,
1571 is_32_bit_ptr_load,
1572 })?;
1573
1574 Ok(return_type)
1575 }
1576 Type::PtrToArray { ref id, offset } => {
1577 self.check_memory_access(
1578 offset,
1579 *self.array_bounds.get(&id).unwrap_or(&0),
1580 field.offset,
1581 field.width.bytes(),
1582 )?;
1583 Ok(Type::UNKNOWN_SCALAR)
1584 }
1585 _ => Err("incorrect load".to_string()),
1586 }
1587 }
1588
1589 fn resolve_return_value(
1596 &self,
1597 verification_context: &mut VerificationContext<'_>,
1598 return_value: &Type,
1599 next: &mut ComputationContext,
1600 maybe_null: bool,
1601 ) -> Result<Type, String> {
1602 match return_value {
1603 Type::AliasParameter { parameter_index } => self.reg(parameter_index + 1),
1604 Type::ReleasableParameter { id, inner } => {
1605 let id = MemoryId::from(verification_context.next_id()).prepended(id.clone());
1606 if !maybe_null {
1607 next.resources.insert(id.clone());
1608 }
1609 Ok(Type::Releasable {
1610 id,
1611 inner: Box::new(self.resolve_return_value(
1612 verification_context,
1613 inner,
1614 next,
1615 maybe_null,
1616 )?),
1617 })
1618 }
1619 Type::NullOrParameter(t) => {
1620 let id = verification_context.next_id();
1621 Ok(Type::NullOr {
1622 id: id.into(),
1623 inner: Box::new(self.resolve_return_value(
1624 verification_context,
1625 t,
1626 next,
1627 true,
1628 )?),
1629 })
1630 }
1631 Type::MapValueParameter { map_ptr_index } => {
1632 let schema = self.get_map_schema(*map_ptr_index)?;
1633 let id = verification_context.next_id();
1634 Ok(Type::PtrToMemory {
1635 id: id.into(),
1636 offset: 0.into(),
1637 buffer_size: schema.value_size as u64,
1638 })
1639 }
1640 Type::MemoryParameter { size, .. } => {
1641 let buffer_size = size.size(self)?;
1642 let id = verification_context.next_id();
1643 Ok(Type::PtrToMemory { id: id.into(), offset: 0.into(), buffer_size })
1644 }
1645 t => Ok(t.clone()),
1646 }
1647 }
1648
1649 fn compute_source(&self, src: Source) -> Result<Type, String> {
1650 match src {
1651 Source::Reg(reg) => self.reg(reg),
1652 Source::Value(v) => Ok(v.into()),
1653 }
1654 }
1655
1656 fn apply_computation(
1657 context: &ComputationContext,
1658 op1: Type,
1659 op2: Type,
1660 alu_type: AluType,
1661 op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1662 ) -> Result<Type, String> {
1663 let result: Type = match (alu_type, op1.inner(context)?, op2.inner(context)?) {
1664 (_, Type::ScalarValue(data1), Type::ScalarValue(data2)) => op(*data1, *data2).into(),
1665 (
1666 AluType::Add,
1667 Type::ScalarValue(_),
1668 Type::PtrToStack { .. } | Type::PtrToMemory { .. } | Type::PtrToStruct { .. },
1669 ) => {
1670 return Self::apply_computation(context, op2, op1, alu_type, op);
1671 }
1672 (alu_type, Type::PtrToStack { offset: x }, Type::ScalarValue(data))
1673 if alu_type.is_ptr_compatible() =>
1674 {
1675 Type::PtrToStack { offset: run_on_stack_offset(*x, |x| op(x, *data)) }
1676 }
1677 (
1678 alu_type,
1679 Type::PtrToMemory { id, offset: x, buffer_size },
1680 Type::ScalarValue(data),
1681 ) if alu_type.is_ptr_compatible() => {
1682 let offset = op(*x, *data);
1683 Type::PtrToMemory { id: id.clone(), offset, buffer_size: *buffer_size }
1684 }
1685 (
1686 alu_type,
1687 Type::PtrToStruct { id, offset: x, descriptor },
1688 Type::ScalarValue(data),
1689 ) if alu_type.is_ptr_compatible() => {
1690 let offset = op(*x, *data);
1691 Type::PtrToStruct { id: id.clone(), offset, descriptor: descriptor.clone() }
1692 }
1693 (AluType::Add, Type::PtrToArray { id, offset: x }, Type::ScalarValue(data)) => {
1694 let offset = x.checked_add(*data).ok_or_else(|| format!("XXX"))?;
1695 Type::PtrToArray { id: id.clone(), offset }
1696 }
1697 (AluType::Sub, Type::PtrToArray { id, offset: x }, Type::ScalarValue(data)) => {
1698 let offset = x.checked_sub(*data).ok_or_else(|| format!("XXX"))?;
1699 Type::PtrToArray { id: id.clone(), offset }
1700 }
1701 (
1702 AluType::Sub,
1703 Type::PtrToMemory { id: id1, offset: x1, .. },
1704 Type::PtrToMemory { id: id2, offset: x2, .. },
1705 )
1706 | (
1707 AluType::Sub,
1708 Type::PtrToStruct { id: id1, offset: x1, .. },
1709 Type::PtrToStruct { id: id2, offset: x2, .. },
1710 )
1711 | (
1712 AluType::Sub,
1713 Type::PtrToArray { id: id1, offset: x1 },
1714 Type::PtrToArray { id: id2, offset: x2 },
1715 ) if id1 == id2 => Type::from(op(*x1, *x2)),
1716 (AluType::Sub, Type::PtrToStack { offset: x1 }, Type::PtrToStack { offset: x2 }) => {
1717 Type::from(op(x1.reg(), x2.reg()))
1718 }
1719 (
1720 AluType::Sub,
1721 Type::PtrToArray { id: id1, .. },
1722 Type::PtrToEndArray { id: id2, .. },
1723 )
1724 | (
1725 AluType::Sub,
1726 Type::PtrToEndArray { id: id1, .. },
1727 Type::PtrToArray { id: id2, .. },
1728 ) if id1 == id2 => Type::UNKNOWN_SCALAR,
1729 _ => Type::default(),
1730 };
1731 Ok(result)
1732 }
1733
1734 fn alu(
1735 &mut self,
1736 op_name: Option<&str>,
1737 verification_context: &mut VerificationContext<'_>,
1738 dst: Register,
1739 src: Source,
1740 alu_type: AluType,
1741 op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1742 ) -> Result<(), String> {
1743 if let Some(op_name) = op_name {
1744 bpf_log!(
1745 self,
1746 verification_context,
1747 "{op_name} {}, {}",
1748 display_register(dst),
1749 display_source(src)
1750 );
1751 }
1752 let op1 = self.reg(dst)?;
1753 let op2 = self.compute_source(src)?;
1754 let result = Self::apply_computation(self, op1, op2, alu_type, op)?;
1755 let mut next = self.next()?;
1756 next.set_reg(dst, result)?;
1757 verification_context.states.push(next);
1758 Ok(())
1759 }
1760
1761 fn log_atomic_operation(
1762 &mut self,
1763 op_name: &str,
1764 verification_context: &mut VerificationContext<'_>,
1765 fetch: bool,
1766 dst: Register,
1767 offset: i16,
1768 src: Register,
1769 ) {
1770 bpf_log!(
1771 self,
1772 verification_context,
1773 "lock {}{} [{}{}], {}",
1774 if fetch { "fetch " } else { "" },
1775 op_name,
1776 display_register(dst),
1777 print_offset(offset),
1778 display_register(src),
1779 );
1780 }
1781
1782 fn raw_atomic_operation(
1783 &mut self,
1784 op_name: &str,
1785 verification_context: &mut VerificationContext<'_>,
1786 width: DataWidth,
1787 fetch: bool,
1788 dst: Register,
1789 offset: i16,
1790 src: Register,
1791 op: impl FnOnce(&ComputationContext, Type, Type) -> Result<Type, String>,
1792 ) -> Result<(), String> {
1793 self.log_atomic_operation(op_name, verification_context, fetch, dst, offset, src);
1794 let addr = self.reg(dst)?;
1795 let value = self.reg(src)?;
1796 let field = Field::new(offset, width);
1797 let loaded_type = self.load_memory(verification_context, &addr, field)?;
1798 let result = op(self, loaded_type.clone(), value)?;
1799 let mut next = self.next()?;
1800 next.store_memory(verification_context, &addr, field, result)?;
1801 if fetch {
1802 next.set_reg(src, loaded_type)?;
1803 }
1804 verification_context.states.push(next);
1805 Ok(())
1806 }
1807
1808 fn atomic_operation(
1809 &mut self,
1810 op_name: &str,
1811 verification_context: &mut VerificationContext<'_>,
1812 width: DataWidth,
1813 fetch: bool,
1814 dst: Register,
1815 offset: i16,
1816 src: Register,
1817 alu_type: AluType,
1818 op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1819 ) -> Result<(), String> {
1820 self.raw_atomic_operation(
1821 op_name,
1822 verification_context,
1823 width,
1824 fetch,
1825 dst,
1826 offset,
1827 src,
1828 |context: &ComputationContext, v1: Type, v2: Type| {
1829 Self::apply_computation(context, v1, v2, alu_type, op)
1830 },
1831 )
1832 }
1833
1834 fn raw_atomic_cmpxchg(
1835 &mut self,
1836 op_name: &str,
1837 verification_context: &mut VerificationContext<'_>,
1838 dst: Register,
1839 offset: i16,
1840 src: Register,
1841 jump_width: JumpWidth,
1842 op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1843 ) -> Result<(), String> {
1844 self.log_atomic_operation(op_name, verification_context, true, dst, offset, src);
1845 let width = match jump_width {
1846 JumpWidth::W32 => DataWidth::U32,
1847 JumpWidth::W64 => DataWidth::U64,
1848 };
1849 let addr = self.reg(dst)?;
1850 let field = Field::new(offset, width);
1851 let dst = self.load_memory(verification_context, &addr, field)?;
1852 let value = self.reg(src)?;
1853 let r0 = self.reg(0)?;
1854 let branch = self.compute_branch(jump_width, &dst, &r0, op)?;
1855 if branch.unwrap_or(true) {
1857 let mut next = self.next()?;
1858 let (dst, r0) =
1859 Type::constraint(&mut next, JumpType::Eq, jump_width, dst.clone(), r0.clone())?;
1860 next.set_reg(0, dst)?;
1861 next.store_memory(verification_context, &addr, field, value)?;
1862 verification_context.states.push(next);
1863 }
1864 if !branch.unwrap_or(false) {
1866 let mut next = self.next()?;
1867 let (dst, r0) = Type::constraint(&mut next, JumpType::Ne, jump_width, dst, r0)?;
1868 next.set_reg(0, dst.clone())?;
1869 next.store_memory(verification_context, &addr, field, dst)?;
1870 verification_context.states.push(next);
1871 }
1872
1873 Ok(())
1874 }
1875 fn endianness<BO: ByteOrder>(
1876 &mut self,
1877 op_name: &str,
1878 verification_context: &mut VerificationContext<'_>,
1879 dst: Register,
1880 width: DataWidth,
1881 ) -> Result<(), String> {
1882 bpf_log!(self, verification_context, "{op_name}{} {}", width.bits(), display_register(dst),);
1883 let bit_op = |value: u64| match width {
1884 DataWidth::U16 => BO::read_u16((value as u16).as_bytes()) as u64,
1885 DataWidth::U32 => BO::read_u32((value as u32).as_bytes()) as u64,
1886 DataWidth::U64 => BO::read_u64(value.as_bytes()),
1887 _ => {
1888 panic!("Unexpected bit width for endianness operation");
1889 }
1890 };
1891 let value = self.reg(dst)?;
1892 let new_value = match value {
1893 Type::ScalarValue(data) => Type::ScalarValue(ScalarValueData::new(
1894 bit_op(data.value),
1895 bit_op(data.unknown_mask),
1896 bit_op(data.unwritten_mask),
1897 U64Range::max(),
1898 )),
1899 _ => Type::default(),
1900 };
1901 let mut next = self.next()?;
1902 next.set_reg(dst, new_value)?;
1903 verification_context.states.push(next);
1904 Ok(())
1905 }
1906
1907 fn compute_branch(
1908 &self,
1909 jump_width: JumpWidth,
1910 op1: &Type,
1911 op2: &Type,
1912 op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1913 ) -> Result<Option<bool>, String> {
1914 match (jump_width, op1, op2) {
1915 (_, Type::ScalarValue(data1), Type::ScalarValue(data2)) => op(*data1, *data2),
1916 (JumpWidth::W64, Type::ScalarValue(data), Type::NullOr { .. })
1917 | (JumpWidth::W64, Type::NullOr { .. }, Type::ScalarValue(data))
1918 if data.is_zero() =>
1919 {
1920 Ok(None)
1921 }
1922
1923 (JumpWidth::W64, Type::ScalarValue(data), t) if data.is_zero() && t.is_non_zero() => {
1924 op(1.into(), 0.into())
1925 }
1926
1927 (JumpWidth::W64, t, Type::ScalarValue(data)) if data.is_zero() && t.is_non_zero() => {
1928 op(0.into(), 1.into())
1929 }
1930
1931 (JumpWidth::W64, Type::PtrToStack { offset: x }, Type::PtrToStack { offset: y }) => {
1932 op(x.reg(), y.reg())
1933 }
1934
1935 (
1936 JumpWidth::W64,
1937 Type::PtrToMemory { id: id1, offset: x, .. },
1938 Type::PtrToMemory { id: id2, offset: y, .. },
1939 )
1940 | (
1941 JumpWidth::W64,
1942 Type::PtrToStruct { id: id1, offset: x, .. },
1943 Type::PtrToStruct { id: id2, offset: y, .. },
1944 )
1945 | (
1946 JumpWidth::W64,
1947 Type::PtrToArray { id: id1, offset: x, .. },
1948 Type::PtrToArray { id: id2, offset: y, .. },
1949 ) if *id1 == *id2 => op(*x, *y),
1950
1951 (JumpWidth::W64, Type::PtrToArray { id: id1, .. }, Type::PtrToEndArray { id: id2 })
1952 | (JumpWidth::W64, Type::PtrToEndArray { id: id1 }, Type::PtrToArray { id: id2, .. })
1953 if *id1 == *id2 =>
1954 {
1955 Ok(None)
1956 }
1957
1958 _ => Err(()),
1959 }
1960 .map_err(|_| "non permitted comparison".to_string())
1961 }
1962
1963 fn conditional_jump(
1964 &mut self,
1965 op_name: &str,
1966 verification_context: &mut VerificationContext<'_>,
1967 dst: Register,
1968 src: Source,
1969 offset: i16,
1970 jump_type: JumpType,
1971 jump_width: JumpWidth,
1972 op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1973 ) -> Result<(), String> {
1974 bpf_log!(
1975 self,
1976 verification_context,
1977 "{op_name} {}, {}, {}",
1978 display_register(dst),
1979 display_source(src),
1980 if offset == 0 { format!("0") } else { print_offset(offset) },
1981 );
1982 let op1 = self.reg(dst)?;
1983 let op2 = self.compute_source(src.clone())?;
1984 let apply_constraints_and_register = |mut next: Self,
1985 jump_type: JumpType|
1986 -> Result<Self, String> {
1987 if jump_type != JumpType::Unknown {
1988 let (new_op1, new_op2) =
1989 Type::constraint(&mut next, jump_type, jump_width, op1.clone(), op2.clone())?;
1990 if dst < REGISTER_COUNT {
1991 next.set_reg(dst, new_op1)?;
1992 }
1993 match src {
1994 Source::Reg(r) => {
1995 next.set_reg(r, new_op2)?;
1996 }
1997 _ => {
1998 }
2000 }
2001 }
2002 Ok(next)
2003 };
2004 let branch = self.compute_branch(jump_width, &op1, &op2, op)?;
2005 let parent = Some(Arc::new(self.clone()));
2006 if branch.unwrap_or(true) {
2007 verification_context.states.push(apply_constraints_and_register(
2009 self.jump_with_offset(offset, parent.clone())?,
2010 jump_type,
2011 )?);
2012 }
2013 if !branch.unwrap_or(false) {
2014 verification_context.states.push(apply_constraints_and_register(
2016 self.jump_with_offset(0, parent)?,
2017 jump_type.invert(),
2018 )?);
2019 }
2020 Ok(())
2021 }
2022
2023 fn terminate(self, verification_context: &mut VerificationContext<'_>) -> Result<(), String> {
2039 let mut next = Some(self);
2040 while let Some(mut current) = next.take() {
2042 let parent = current.parent.take();
2045
2046 let mut dependencies = DataDependencies::default();
2049 for dependency in current.dependencies.get_mut().iter() {
2050 dependencies.merge(dependency);
2051 }
2052
2053 dependencies.visit(
2054 &mut DataDependenciesVisitorContext {
2055 calling_context: &verification_context.calling_context,
2056 computation_context: ¤t,
2057 },
2058 verification_context.code[current.pc],
2059 )?;
2060
2061 for register in 0..GENERAL_REGISTER_COUNT {
2063 if !dependencies.registers.contains(®ister) {
2064 current.set_reg(register, Default::default())?;
2065 }
2066 }
2067 current.stack.data.retain(|k, _| dependencies.stack.contains(k));
2068
2069 let terminating_contexts =
2071 verification_context.terminating_contexts.entry(current.pc).or_default();
2072 let mut is_dominated = false;
2073 terminating_contexts.retain(|c| match c.computation_context.partial_cmp(¤t) {
2074 Some(Ordering::Less) => false,
2075 Some(Ordering::Equal) | Some(Ordering::Greater) => {
2076 is_dominated = true;
2079 true
2080 }
2081 _ => true,
2082 });
2083 if !is_dominated {
2084 terminating_contexts.push(TerminatingContext {
2085 computation_context: current,
2086 dependencies: dependencies.clone(),
2087 });
2088 }
2089
2090 if let Some(parent) = parent {
2093 parent.dependencies.lock().push(dependencies);
2094 next = Arc::into_inner(parent);
2097 }
2098 }
2099 Ok(())
2100 }
2101}
2102
2103impl Drop for ComputationContext {
2104 fn drop(&mut self) {
2105 let mut next = self.parent.take().and_then(Arc::into_inner);
2106 while let Some(mut current) = next {
2108 next = current.parent.take().and_then(Arc::into_inner);
2109 }
2110 }
2111}
2112
2113impl PartialOrd for ComputationContext {
2116 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2117 if self.pc != other.pc || self.resources != other.resources {
2118 return None;
2119 }
2120 let mut result = self.stack.partial_cmp(&other.stack)?;
2121 result = associate_orderings(
2122 result,
2123 Type::compare_list(self.registers.iter(), other.registers.iter())?,
2124 )?;
2125 let mut array_bound_iter1 = self.array_bounds.iter().peekable();
2126 let mut array_bound_iter2 = other.array_bounds.iter().peekable();
2127 let result = loop {
2128 match (array_bound_iter1.peek().cloned(), array_bound_iter2.peek().cloned()) {
2129 (None, None) => break result,
2130 (None, _) => break associate_orderings(result, Ordering::Greater)?,
2131 (_, None) => break associate_orderings(result, Ordering::Less)?,
2132 (Some((k1, v1)), Some((k2, v2))) => match k1.cmp(k2) {
2133 Ordering::Equal => {
2134 array_bound_iter1.next();
2135 array_bound_iter2.next();
2136 result = associate_orderings(result, v2.cmp(v1))?;
2137 }
2138 v @ Ordering::Less => {
2139 array_bound_iter1.next();
2140 result = associate_orderings(result, v)?;
2141 }
2142 v @ Ordering::Greater => {
2143 array_bound_iter2.next();
2144 result = associate_orderings(result, v)?;
2145 }
2146 },
2147 }
2148 };
2149 Some(result)
2150 }
2151}
2152
2153#[derive(Clone, Debug, Default)]
2162struct DataDependencies {
2163 registers: HashSet<Register>,
2165 stack: HashSet<usize>,
2167}
2168
2169impl DataDependencies {
2170 fn merge(&mut self, other: &DataDependencies) {
2171 self.registers.extend(other.registers.iter());
2172 self.stack.extend(other.stack.iter());
2173 }
2174
2175 fn alu(&mut self, dst: Register, src: Source) -> Result<(), String> {
2176 if self.registers.contains(&dst) {
2178 if let Source::Reg(src) = src {
2179 self.registers.insert(src);
2180 }
2181 }
2182 Ok(())
2183 }
2184
2185 fn jmp(&mut self, dst: Register, src: Source) -> Result<(), String> {
2186 self.registers.insert(dst);
2187 if let Source::Reg(src) = src {
2188 self.registers.insert(src);
2189 }
2190 Ok(())
2191 }
2192
2193 fn atomic(
2194 &mut self,
2195 context: &ComputationContext,
2196 fetch: bool,
2197 dst: Register,
2198 offset: i16,
2199 src: Register,
2200 width: DataWidth,
2201 is_cmpxchg: bool,
2202 ) -> Result<(), String> {
2203 let mut is_read = false;
2204 if is_cmpxchg && self.registers.contains(&0) {
2205 is_read = true;
2206 }
2207 if fetch && self.registers.contains(&src) {
2208 is_read = true;
2209 }
2210 let addr = context.reg(dst)?;
2211 if let Type::PtrToStack { offset: stack_offset } = addr {
2212 let stack_offset = stack_offset.add(offset);
2213 if !stack_offset.is_valid() {
2214 return Err(format!("Invalid stack offset at {}", context.pc));
2215 }
2216 if is_read || self.stack.contains(&stack_offset.array_index()) {
2217 is_read = true;
2218 self.stack.insert(stack_offset.array_index());
2219 }
2220 }
2221 if is_read {
2222 self.registers.insert(0);
2223 self.registers.insert(src);
2224 }
2225 self.registers.insert(dst);
2226 Ok(())
2227 }
2228}
2229
2230struct DataDependenciesVisitorContext<'a> {
2231 calling_context: &'a CallingContext,
2232 computation_context: &'a ComputationContext,
2233}
2234
2235impl BpfVisitor for DataDependencies {
2236 type Context<'a> = DataDependenciesVisitorContext<'a>;
2237
2238 fn add<'a>(
2239 &mut self,
2240 _context: &mut Self::Context<'a>,
2241 dst: Register,
2242 src: Source,
2243 ) -> Result<(), String> {
2244 self.alu(dst, src)
2245 }
2246 fn add64<'a>(
2247 &mut self,
2248 _context: &mut Self::Context<'a>,
2249 dst: Register,
2250 src: Source,
2251 ) -> Result<(), String> {
2252 self.alu(dst, src)
2253 }
2254 fn and<'a>(
2255 &mut self,
2256 _context: &mut Self::Context<'a>,
2257 dst: Register,
2258 src: Source,
2259 ) -> Result<(), String> {
2260 self.alu(dst, src)
2261 }
2262 fn and64<'a>(
2263 &mut self,
2264 _context: &mut Self::Context<'a>,
2265 dst: Register,
2266 src: Source,
2267 ) -> Result<(), String> {
2268 self.alu(dst, src)
2269 }
2270 fn arsh<'a>(
2271 &mut self,
2272 _context: &mut Self::Context<'a>,
2273 dst: Register,
2274 src: Source,
2275 ) -> Result<(), String> {
2276 self.alu(dst, src)
2277 }
2278 fn arsh64<'a>(
2279 &mut self,
2280 _context: &mut Self::Context<'a>,
2281 dst: Register,
2282 src: Source,
2283 ) -> Result<(), String> {
2284 self.alu(dst, src)
2285 }
2286 fn div<'a>(
2287 &mut self,
2288 _context: &mut Self::Context<'a>,
2289 dst: Register,
2290 src: Source,
2291 ) -> Result<(), String> {
2292 self.alu(dst, src)
2293 }
2294 fn div64<'a>(
2295 &mut self,
2296 _context: &mut Self::Context<'a>,
2297 dst: Register,
2298 src: Source,
2299 ) -> Result<(), String> {
2300 self.alu(dst, src)
2301 }
2302 fn lsh<'a>(
2303 &mut self,
2304 _context: &mut Self::Context<'a>,
2305 dst: Register,
2306 src: Source,
2307 ) -> Result<(), String> {
2308 self.alu(dst, src)
2309 }
2310 fn lsh64<'a>(
2311 &mut self,
2312 _context: &mut Self::Context<'a>,
2313 dst: Register,
2314 src: Source,
2315 ) -> Result<(), String> {
2316 self.alu(dst, src)
2317 }
2318 fn r#mod<'a>(
2319 &mut self,
2320 _context: &mut Self::Context<'a>,
2321 dst: Register,
2322 src: Source,
2323 ) -> Result<(), String> {
2324 self.alu(dst, src)
2325 }
2326 fn mod64<'a>(
2327 &mut self,
2328 _context: &mut Self::Context<'a>,
2329 dst: Register,
2330 src: Source,
2331 ) -> Result<(), String> {
2332 self.alu(dst, src)
2333 }
2334 fn mul<'a>(
2335 &mut self,
2336 _context: &mut Self::Context<'a>,
2337 dst: Register,
2338 src: Source,
2339 ) -> Result<(), String> {
2340 self.alu(dst, src)
2341 }
2342 fn mul64<'a>(
2343 &mut self,
2344 _context: &mut Self::Context<'a>,
2345 dst: Register,
2346 src: Source,
2347 ) -> Result<(), String> {
2348 self.alu(dst, src)
2349 }
2350 fn or<'a>(
2351 &mut self,
2352 _context: &mut Self::Context<'a>,
2353 dst: Register,
2354 src: Source,
2355 ) -> Result<(), String> {
2356 self.alu(dst, src)
2357 }
2358 fn or64<'a>(
2359 &mut self,
2360 _context: &mut Self::Context<'a>,
2361 dst: Register,
2362 src: Source,
2363 ) -> Result<(), String> {
2364 self.alu(dst, src)
2365 }
2366 fn rsh<'a>(
2367 &mut self,
2368 _context: &mut Self::Context<'a>,
2369 dst: Register,
2370 src: Source,
2371 ) -> Result<(), String> {
2372 self.alu(dst, src)
2373 }
2374 fn rsh64<'a>(
2375 &mut self,
2376 _context: &mut Self::Context<'a>,
2377 dst: Register,
2378 src: Source,
2379 ) -> Result<(), String> {
2380 self.alu(dst, src)
2381 }
2382 fn sub<'a>(
2383 &mut self,
2384 _context: &mut Self::Context<'a>,
2385 dst: Register,
2386 src: Source,
2387 ) -> Result<(), String> {
2388 self.alu(dst, src)
2389 }
2390 fn sub64<'a>(
2391 &mut self,
2392 _context: &mut Self::Context<'a>,
2393 dst: Register,
2394 src: Source,
2395 ) -> Result<(), String> {
2396 self.alu(dst, src)
2397 }
2398 fn xor<'a>(
2399 &mut self,
2400 _context: &mut Self::Context<'a>,
2401 dst: Register,
2402 src: Source,
2403 ) -> Result<(), String> {
2404 self.alu(dst, src)
2405 }
2406 fn xor64<'a>(
2407 &mut self,
2408 _context: &mut Self::Context<'a>,
2409 dst: Register,
2410 src: Source,
2411 ) -> Result<(), String> {
2412 self.alu(dst, src)
2413 }
2414
2415 fn mov<'a>(
2416 &mut self,
2417 _context: &mut Self::Context<'a>,
2418 dst: Register,
2419 src: Source,
2420 ) -> Result<(), String> {
2421 if src == Source::Reg(dst) || !self.registers.contains(&dst) {
2422 return Ok(());
2423 }
2424 if let Source::Reg(src) = src {
2425 self.registers.insert(src);
2426 }
2427 self.registers.remove(&dst);
2428 Ok(())
2429 }
2430 fn mov64<'a>(
2431 &mut self,
2432 context: &mut Self::Context<'a>,
2433 dst: Register,
2434 src: Source,
2435 ) -> Result<(), String> {
2436 self.mov(context, dst, src)
2437 }
2438
2439 fn neg<'a>(&mut self, _context: &mut Self::Context<'a>, _dst: Register) -> Result<(), String> {
2440 Ok(())
2442 }
2443 fn neg64<'a>(
2444 &mut self,
2445 _context: &mut Self::Context<'a>,
2446 _dst: Register,
2447 ) -> Result<(), String> {
2448 Ok(())
2450 }
2451
2452 fn be<'a>(
2453 &mut self,
2454 _context: &mut Self::Context<'a>,
2455 _dst: Register,
2456 _width: DataWidth,
2457 ) -> Result<(), String> {
2458 Ok(())
2460 }
2461 fn le<'a>(
2462 &mut self,
2463 _context: &mut Self::Context<'a>,
2464 _dst: Register,
2465 _width: DataWidth,
2466 ) -> Result<(), String> {
2467 Ok(())
2469 }
2470
2471 fn call_external<'a>(
2472 &mut self,
2473 context: &mut Self::Context<'a>,
2474 index: u32,
2475 ) -> Result<(), String> {
2476 let Some(helper) = context.calling_context.helpers.get(&index).cloned() else {
2477 return Err(format!("unknown external function {}", index));
2478 };
2479 for register in 0..helper.signature.args.len() + 1 {
2481 self.registers.remove(&(register as Register));
2482 }
2483 for register in 0..helper.signature.args.len() {
2485 self.registers.insert((register + 1) as Register);
2486 }
2487 Ok(())
2488 }
2489
2490 fn exit<'a>(&mut self, _context: &mut Self::Context<'a>) -> Result<(), String> {
2491 self.registers.insert(0);
2493 Ok(())
2494 }
2495
2496 fn jump<'a>(&mut self, _context: &mut Self::Context<'a>, _offset: i16) -> Result<(), String> {
2497 Ok(())
2499 }
2500
2501 fn jeq<'a>(
2502 &mut self,
2503 _context: &mut Self::Context<'a>,
2504 dst: Register,
2505 src: Source,
2506 offset: i16,
2507 ) -> Result<(), String> {
2508 self.jmp(dst, src)
2509 }
2510 fn jeq64<'a>(
2511 &mut self,
2512 _context: &mut Self::Context<'a>,
2513 dst: Register,
2514 src: Source,
2515 offset: i16,
2516 ) -> Result<(), String> {
2517 self.jmp(dst, src)
2518 }
2519 fn jne<'a>(
2520 &mut self,
2521 _context: &mut Self::Context<'a>,
2522 dst: Register,
2523 src: Source,
2524 offset: i16,
2525 ) -> Result<(), String> {
2526 self.jmp(dst, src)
2527 }
2528 fn jne64<'a>(
2529 &mut self,
2530 _context: &mut Self::Context<'a>,
2531 dst: Register,
2532 src: Source,
2533 offset: i16,
2534 ) -> Result<(), String> {
2535 self.jmp(dst, src)
2536 }
2537 fn jge<'a>(
2538 &mut self,
2539 _context: &mut Self::Context<'a>,
2540 dst: Register,
2541 src: Source,
2542 offset: i16,
2543 ) -> Result<(), String> {
2544 self.jmp(dst, src)
2545 }
2546 fn jge64<'a>(
2547 &mut self,
2548 _context: &mut Self::Context<'a>,
2549 dst: Register,
2550 src: Source,
2551 offset: i16,
2552 ) -> Result<(), String> {
2553 self.jmp(dst, src)
2554 }
2555 fn jgt<'a>(
2556 &mut self,
2557 _context: &mut Self::Context<'a>,
2558 dst: Register,
2559 src: Source,
2560 offset: i16,
2561 ) -> Result<(), String> {
2562 self.jmp(dst, src)
2563 }
2564 fn jgt64<'a>(
2565 &mut self,
2566 _context: &mut Self::Context<'a>,
2567 dst: Register,
2568 src: Source,
2569 offset: i16,
2570 ) -> Result<(), String> {
2571 self.jmp(dst, src)
2572 }
2573 fn jle<'a>(
2574 &mut self,
2575 _context: &mut Self::Context<'a>,
2576 dst: Register,
2577 src: Source,
2578 offset: i16,
2579 ) -> Result<(), String> {
2580 self.jmp(dst, src)
2581 }
2582 fn jle64<'a>(
2583 &mut self,
2584 _context: &mut Self::Context<'a>,
2585 dst: Register,
2586 src: Source,
2587 offset: i16,
2588 ) -> Result<(), String> {
2589 self.jmp(dst, src)
2590 }
2591 fn jlt<'a>(
2592 &mut self,
2593 _context: &mut Self::Context<'a>,
2594 dst: Register,
2595 src: Source,
2596 offset: i16,
2597 ) -> Result<(), String> {
2598 self.jmp(dst, src)
2599 }
2600 fn jlt64<'a>(
2601 &mut self,
2602 _context: &mut Self::Context<'a>,
2603 dst: Register,
2604 src: Source,
2605 offset: i16,
2606 ) -> Result<(), String> {
2607 self.jmp(dst, src)
2608 }
2609 fn jsge<'a>(
2610 &mut self,
2611 _context: &mut Self::Context<'a>,
2612 dst: Register,
2613 src: Source,
2614 offset: i16,
2615 ) -> Result<(), String> {
2616 self.jmp(dst, src)
2617 }
2618 fn jsge64<'a>(
2619 &mut self,
2620 _context: &mut Self::Context<'a>,
2621 dst: Register,
2622 src: Source,
2623 offset: i16,
2624 ) -> Result<(), String> {
2625 self.jmp(dst, src)
2626 }
2627 fn jsgt<'a>(
2628 &mut self,
2629 _context: &mut Self::Context<'a>,
2630 dst: Register,
2631 src: Source,
2632 offset: i16,
2633 ) -> Result<(), String> {
2634 self.jmp(dst, src)
2635 }
2636 fn jsgt64<'a>(
2637 &mut self,
2638 _context: &mut Self::Context<'a>,
2639 dst: Register,
2640 src: Source,
2641 offset: i16,
2642 ) -> Result<(), String> {
2643 self.jmp(dst, src)
2644 }
2645 fn jsle<'a>(
2646 &mut self,
2647 _context: &mut Self::Context<'a>,
2648 dst: Register,
2649 src: Source,
2650 offset: i16,
2651 ) -> Result<(), String> {
2652 self.jmp(dst, src)
2653 }
2654 fn jsle64<'a>(
2655 &mut self,
2656 _context: &mut Self::Context<'a>,
2657 dst: Register,
2658 src: Source,
2659 offset: i16,
2660 ) -> Result<(), String> {
2661 self.jmp(dst, src)
2662 }
2663 fn jslt<'a>(
2664 &mut self,
2665 _context: &mut Self::Context<'a>,
2666 dst: Register,
2667 src: Source,
2668 offset: i16,
2669 ) -> Result<(), String> {
2670 self.jmp(dst, src)
2671 }
2672 fn jslt64<'a>(
2673 &mut self,
2674 _context: &mut Self::Context<'a>,
2675 dst: Register,
2676 src: Source,
2677 offset: i16,
2678 ) -> Result<(), String> {
2679 self.jmp(dst, src)
2680 }
2681 fn jset<'a>(
2682 &mut self,
2683 _context: &mut Self::Context<'a>,
2684 dst: Register,
2685 src: Source,
2686 offset: i16,
2687 ) -> Result<(), String> {
2688 self.jmp(dst, src)
2689 }
2690 fn jset64<'a>(
2691 &mut self,
2692 _context: &mut Self::Context<'a>,
2693 dst: Register,
2694 src: Source,
2695 offset: i16,
2696 ) -> Result<(), String> {
2697 self.jmp(dst, src)
2698 }
2699
2700 fn atomic_add<'a>(
2701 &mut self,
2702 context: &mut Self::Context<'a>,
2703 fetch: bool,
2704 dst: Register,
2705 offset: i16,
2706 src: Register,
2707 ) -> Result<(), String> {
2708 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2709 }
2710
2711 fn atomic_add64<'a>(
2712 &mut self,
2713 context: &mut Self::Context<'a>,
2714 fetch: bool,
2715 dst: Register,
2716 offset: i16,
2717 src: Register,
2718 ) -> Result<(), String> {
2719 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2720 }
2721
2722 fn atomic_and<'a>(
2723 &mut self,
2724 context: &mut Self::Context<'a>,
2725 fetch: bool,
2726 dst: Register,
2727 offset: i16,
2728 src: Register,
2729 ) -> Result<(), String> {
2730 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2731 }
2732
2733 fn atomic_and64<'a>(
2734 &mut self,
2735 context: &mut Self::Context<'a>,
2736 fetch: bool,
2737 dst: Register,
2738 offset: i16,
2739 src: Register,
2740 ) -> Result<(), String> {
2741 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2742 }
2743
2744 fn atomic_or<'a>(
2745 &mut self,
2746 context: &mut Self::Context<'a>,
2747 fetch: bool,
2748 dst: Register,
2749 offset: i16,
2750 src: Register,
2751 ) -> Result<(), String> {
2752 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2753 }
2754
2755 fn atomic_or64<'a>(
2756 &mut self,
2757 context: &mut Self::Context<'a>,
2758 fetch: bool,
2759 dst: Register,
2760 offset: i16,
2761 src: Register,
2762 ) -> Result<(), String> {
2763 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2764 }
2765
2766 fn atomic_xor<'a>(
2767 &mut self,
2768 context: &mut Self::Context<'a>,
2769 fetch: bool,
2770 dst: Register,
2771 offset: i16,
2772 src: Register,
2773 ) -> Result<(), String> {
2774 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2775 }
2776
2777 fn atomic_xor64<'a>(
2778 &mut self,
2779 context: &mut Self::Context<'a>,
2780 fetch: bool,
2781 dst: Register,
2782 offset: i16,
2783 src: Register,
2784 ) -> Result<(), String> {
2785 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2786 }
2787
2788 fn atomic_xchg<'a>(
2789 &mut self,
2790 context: &mut Self::Context<'a>,
2791 fetch: bool,
2792 dst: Register,
2793 offset: i16,
2794 src: Register,
2795 ) -> Result<(), String> {
2796 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2797 }
2798
2799 fn atomic_xchg64<'a>(
2800 &mut self,
2801 context: &mut Self::Context<'a>,
2802 fetch: bool,
2803 dst: Register,
2804 offset: i16,
2805 src: Register,
2806 ) -> Result<(), String> {
2807 self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2808 }
2809
2810 fn atomic_cmpxchg<'a>(
2811 &mut self,
2812 context: &mut Self::Context<'a>,
2813 dst: Register,
2814 offset: i16,
2815 src: Register,
2816 ) -> Result<(), String> {
2817 self.atomic(&context.computation_context, true, dst, offset, src, DataWidth::U32, true)
2818 }
2819
2820 fn atomic_cmpxchg64<'a>(
2821 &mut self,
2822 context: &mut Self::Context<'a>,
2823 dst: Register,
2824 offset: i16,
2825 src: Register,
2826 ) -> Result<(), String> {
2827 self.atomic(&context.computation_context, true, dst, offset, src, DataWidth::U64, true)
2828 }
2829
2830 fn load<'a>(
2831 &mut self,
2832 context: &mut Self::Context<'a>,
2833 dst: Register,
2834 offset: i16,
2835 src: Register,
2836 width: DataWidth,
2837 ) -> Result<(), String> {
2838 let context = &context.computation_context;
2839 if self.registers.contains(&dst) {
2840 let addr = context.reg(src)?;
2841 if let Type::PtrToStack { offset: stack_offset } = addr {
2842 let stack_offset = stack_offset.add(offset);
2843 if !stack_offset.is_valid() {
2844 return Err(format!("Invalid stack offset at {}", context.pc));
2845 }
2846 self.stack.insert(stack_offset.array_index());
2847 }
2848 }
2849 self.registers.insert(src);
2850 Ok(())
2851 }
2852
2853 fn load64<'a>(
2854 &mut self,
2855 _context: &mut Self::Context<'a>,
2856 dst: Register,
2857 _src: u8,
2858 _lower: u32,
2859 ) -> Result<(), String> {
2860 self.registers.remove(&dst);
2861 Ok(())
2862 }
2863
2864 fn load_from_packet<'a>(
2865 &mut self,
2866 _context: &mut Self::Context<'a>,
2867 dst: Register,
2868 src: Register,
2869 _offset: i32,
2870 register_offset: Option<Register>,
2871 _width: DataWidth,
2872 ) -> Result<(), String> {
2873 for register in 1..6 {
2875 self.registers.remove(&(register as Register));
2876 }
2877 if self.registers.remove(&dst) {
2879 self.registers.insert(src);
2880 if let Some(reg) = register_offset {
2881 self.registers.insert(reg);
2882 }
2883 }
2884 Ok(())
2885 }
2886
2887 fn store<'a>(
2888 &mut self,
2889 context: &mut Self::Context<'a>,
2890 dst: Register,
2891 offset: i16,
2892 src: Source,
2893 width: DataWidth,
2894 ) -> Result<(), String> {
2895 let context = &context.computation_context;
2896 let addr = context.reg(dst)?;
2897 if let Type::PtrToStack { offset: stack_offset } = addr {
2898 let stack_offset = stack_offset.add(offset);
2899 if !stack_offset.is_valid() {
2900 return Err(format!("Invalid stack offset at {}", context.pc));
2901 }
2902 if self.stack.remove(&stack_offset.array_index()) {
2903 if let Source::Reg(src) = src {
2904 self.registers.insert(src);
2905 }
2906 }
2907 } else {
2908 if let Source::Reg(src) = src {
2909 self.registers.insert(src);
2910 }
2911 }
2912
2913 Ok(())
2914 }
2915}
2916
2917#[derive(Debug)]
2918struct TerminatingContext {
2919 computation_context: ComputationContext,
2920 dependencies: DataDependencies,
2921}
2922
2923#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2924enum AluType {
2925 Plain,
2926 Sub,
2927 Add,
2928}
2929
2930impl AluType {
2931 fn is_ptr_compatible(&self) -> bool {
2933 match self {
2934 Self::Sub | Self::Add => true,
2935 _ => false,
2936 }
2937 }
2938}
2939
2940#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2941enum JumpWidth {
2942 W32,
2943 W64,
2944}
2945
2946#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2947enum JumpType {
2948 Eq,
2949 Ge,
2950 Gt,
2951 Le,
2952 LooseComparaison,
2953 Lt,
2954 Ne,
2955 StrictComparaison,
2956 Unknown,
2957}
2958
2959impl JumpType {
2960 fn invert(&self) -> Self {
2961 match self {
2962 Self::Eq => Self::Ne,
2963 Self::Ge => Self::Lt,
2964 Self::Gt => Self::Le,
2965 Self::Le => Self::Gt,
2966 Self::LooseComparaison => Self::StrictComparaison,
2967 Self::Lt => Self::Ge,
2968 Self::Ne => Self::Eq,
2969 Self::StrictComparaison => Self::LooseComparaison,
2970 Self::Unknown => Self::Unknown,
2971 }
2972 }
2973
2974 fn is_strict(&self) -> bool {
2975 match self {
2976 Self::Gt | Self::Lt | Self::Ne | Self::StrictComparaison => true,
2977 _ => false,
2978 }
2979 }
2980}
2981
2982fn display_register(register: Register) -> String {
2983 format!("%r{register}")
2984}
2985
2986fn display_source(src: Source) -> String {
2987 match src {
2988 Source::Reg(r) => display_register(r),
2989 Source::Value(v) => format!("0x{v:x}"),
2990 }
2991}
2992
2993impl BpfVisitor for ComputationContext {
2994 type Context<'a> = VerificationContext<'a>;
2995
2996 fn add<'a>(
2997 &mut self,
2998 context: &mut Self::Context<'a>,
2999 dst: Register,
3000 src: Source,
3001 ) -> Result<(), String> {
3002 self.alu(Some("add32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x + y))
3003 }
3004 fn add64<'a>(
3005 &mut self,
3006 context: &mut Self::Context<'a>,
3007 dst: Register,
3008 src: Source,
3009 ) -> Result<(), String> {
3010 self.alu(Some("add"), context, dst, src, AluType::Add, |x, y| x + y)
3011 }
3012 fn and<'a>(
3013 &mut self,
3014 context: &mut Self::Context<'a>,
3015 dst: Register,
3016 src: Source,
3017 ) -> Result<(), String> {
3018 self.alu(Some("and32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x & y))
3019 }
3020 fn and64<'a>(
3021 &mut self,
3022 context: &mut Self::Context<'a>,
3023 dst: Register,
3024 src: Source,
3025 ) -> Result<(), String> {
3026 self.alu(Some("and"), context, dst, src, AluType::Plain, |x, y| x & y)
3027 }
3028 fn arsh<'a>(
3029 &mut self,
3030 context: &mut Self::Context<'a>,
3031 dst: Register,
3032 src: Source,
3033 ) -> Result<(), String> {
3034 self.alu(Some("arsh32"), context, dst, src, AluType::Plain, |x, y| {
3035 alu32(x, y, |x, y| x.ashr(y))
3036 })
3037 }
3038 fn arsh64<'a>(
3039 &mut self,
3040 context: &mut Self::Context<'a>,
3041 dst: Register,
3042 src: Source,
3043 ) -> Result<(), String> {
3044 self.alu(Some("arsh"), context, dst, src, AluType::Plain, |x, y| x.ashr(y))
3045 }
3046 fn div<'a>(
3047 &mut self,
3048 context: &mut Self::Context<'a>,
3049 dst: Register,
3050 src: Source,
3051 ) -> Result<(), String> {
3052 self.alu(Some("div32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x / y))
3053 }
3054 fn div64<'a>(
3055 &mut self,
3056 context: &mut Self::Context<'a>,
3057 dst: Register,
3058 src: Source,
3059 ) -> Result<(), String> {
3060 self.alu(Some("div"), context, dst, src, AluType::Plain, |x, y| x / y)
3061 }
3062 fn lsh<'a>(
3063 &mut self,
3064 context: &mut Self::Context<'a>,
3065 dst: Register,
3066 src: Source,
3067 ) -> Result<(), String> {
3068 self.alu(Some("lsh32"), context, dst, src, AluType::Plain, |x, y| {
3069 alu32(x, y, |x, y| x << y)
3070 })
3071 }
3072 fn lsh64<'a>(
3073 &mut self,
3074 context: &mut Self::Context<'a>,
3075 dst: Register,
3076 src: Source,
3077 ) -> Result<(), String> {
3078 self.alu(Some("lsh"), context, dst, src, AluType::Plain, |x, y| x << y)
3079 }
3080 fn r#mod<'a>(
3081 &mut self,
3082 context: &mut Self::Context<'a>,
3083 dst: Register,
3084 src: Source,
3085 ) -> Result<(), String> {
3086 self.alu(Some("mod32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x % y))
3087 }
3088 fn mod64<'a>(
3089 &mut self,
3090 context: &mut Self::Context<'a>,
3091 dst: Register,
3092 src: Source,
3093 ) -> Result<(), String> {
3094 self.alu(Some("mod"), context, dst, src, AluType::Plain, |x, y| x % y)
3095 }
3096 fn mov<'a>(
3097 &mut self,
3098 context: &mut Self::Context<'a>,
3099 dst: Register,
3100 src: Source,
3101 ) -> Result<(), String> {
3102 bpf_log!(self, context, "mov32 {}, {}", display_register(dst), display_source(src));
3103 let src = self.compute_source(src)?;
3104 let value = match src {
3105 Type::ScalarValue(data) => {
3106 let value = (data.value as u32) as u64;
3107 let unknown_mask = (data.unknown_mask as u32) as u64;
3108 let unwritten_mask = (data.unwritten_mask as u32) as u64;
3109 let urange = U64Range::compute_range_for_bytes_swap(0.into(), data.urange, 0, 0, 4);
3110 Type::ScalarValue(ScalarValueData::new(value, unknown_mask, unwritten_mask, urange))
3111 }
3112 _ => Type::default(),
3113 };
3114 let mut next = self.next()?;
3115 next.set_reg(dst, value)?;
3116 context.states.push(next);
3117 Ok(())
3118 }
3119 fn mov64<'a>(
3120 &mut self,
3121 context: &mut Self::Context<'a>,
3122 dst: Register,
3123 src: Source,
3124 ) -> Result<(), String> {
3125 bpf_log!(self, context, "mov {}, {}", display_register(dst), display_source(src));
3126 let src = self.compute_source(src)?;
3127 let mut next = self.next()?;
3128 next.set_reg(dst, src)?;
3129 context.states.push(next);
3130 Ok(())
3131 }
3132 fn mul<'a>(
3133 &mut self,
3134 context: &mut Self::Context<'a>,
3135 dst: Register,
3136 src: Source,
3137 ) -> Result<(), String> {
3138 self.alu(Some("mul32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x * y))
3139 }
3140 fn mul64<'a>(
3141 &mut self,
3142 context: &mut Self::Context<'a>,
3143 dst: Register,
3144 src: Source,
3145 ) -> Result<(), String> {
3146 self.alu(Some("mul"), context, dst, src, AluType::Plain, |x, y| x * y)
3147 }
3148 fn or<'a>(
3149 &mut self,
3150 context: &mut Self::Context<'a>,
3151 dst: Register,
3152 src: Source,
3153 ) -> Result<(), String> {
3154 self.alu(Some("or32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x | y))
3155 }
3156 fn or64<'a>(
3157 &mut self,
3158 context: &mut Self::Context<'a>,
3159 dst: Register,
3160 src: Source,
3161 ) -> Result<(), String> {
3162 self.alu(Some("or"), context, dst, src, AluType::Plain, |x, y| x | y)
3163 }
3164 fn rsh<'a>(
3165 &mut self,
3166 context: &mut Self::Context<'a>,
3167 dst: Register,
3168 src: Source,
3169 ) -> Result<(), String> {
3170 self.alu(Some("rsh32"), context, dst, src, AluType::Plain, |x, y| {
3171 alu32(x, y, |x, y| x >> y)
3172 })
3173 }
3174 fn rsh64<'a>(
3175 &mut self,
3176 context: &mut Self::Context<'a>,
3177 dst: Register,
3178 src: Source,
3179 ) -> Result<(), String> {
3180 self.alu(Some("rsh"), context, dst, src, AluType::Plain, |x, y| x >> y)
3181 }
3182 fn sub<'a>(
3183 &mut self,
3184 context: &mut Self::Context<'a>,
3185 dst: Register,
3186 src: Source,
3187 ) -> Result<(), String> {
3188 self.alu(Some("sub32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x - y))
3189 }
3190 fn sub64<'a>(
3191 &mut self,
3192 context: &mut Self::Context<'a>,
3193 dst: Register,
3194 src: Source,
3195 ) -> Result<(), String> {
3196 self.alu(Some("sub"), context, dst, src, AluType::Sub, |x, y| x - y)
3197 }
3198 fn xor<'a>(
3199 &mut self,
3200 context: &mut Self::Context<'a>,
3201 dst: Register,
3202 src: Source,
3203 ) -> Result<(), String> {
3204 self.alu(Some("xor32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x ^ y))
3205 }
3206 fn xor64<'a>(
3207 &mut self,
3208 context: &mut Self::Context<'a>,
3209 dst: Register,
3210 src: Source,
3211 ) -> Result<(), String> {
3212 self.alu(Some("xor"), context, dst, src, AluType::Plain, |x, y| x ^ y)
3213 }
3214
3215 fn neg<'a>(&mut self, context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
3216 bpf_log!(self, context, "neg32 {}", display_register(dst));
3217 self.alu(None, context, dst, Source::Value(0), AluType::Plain, |x, y| {
3218 alu32(x, y, |x, _y| -x)
3219 })
3220 }
3221 fn neg64<'a>(&mut self, context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
3222 bpf_log!(self, context, "neg {}", display_register(dst));
3223 self.alu(None, context, dst, Source::Value(0), AluType::Plain, |x, _y| -x)
3224 }
3225
3226 fn be<'a>(
3227 &mut self,
3228 context: &mut Self::Context<'a>,
3229 dst: Register,
3230 width: DataWidth,
3231 ) -> Result<(), String> {
3232 self.endianness::<BigEndian>("be", context, dst, width)
3233 }
3234
3235 fn le<'a>(
3236 &mut self,
3237 context: &mut Self::Context<'a>,
3238 dst: Register,
3239 width: DataWidth,
3240 ) -> Result<(), String> {
3241 self.endianness::<LittleEndian>("le", context, dst, width)
3242 }
3243
3244 fn call_external<'a>(
3245 &mut self,
3246 context: &mut Self::Context<'a>,
3247 index: u32,
3248 ) -> Result<(), String> {
3249 bpf_log!(self, context, "call 0x{:x}", index);
3250 let Some(helper) = context.calling_context.helpers.get(&index).cloned() else {
3251 return Err(format!("unknown external function {}", index));
3252 };
3253 let HelperDefinition { signature, name, .. } = helper;
3254 debug_assert!(signature.args.len() <= 5);
3255 let mut next = self.next()?;
3256 for (arg_index, arg) in signature.args.iter().enumerate() {
3257 let reg = (arg_index + 1) as u8;
3258 self.reg(reg)?.match_parameter_type(context, self, name, arg, arg_index, &mut next)?
3259 }
3260 if signature.invalidate_array_bounds {
3262 next.array_bounds.clear();
3263 }
3264 let value =
3265 self.resolve_return_value(context, &signature.return_value, &mut next, false)?;
3266 next.set_reg(0, value)?;
3267 for i in 1..=5 {
3268 next.set_reg(i, Type::default())?;
3269 }
3270 context.states.push(next);
3271 Ok(())
3272 }
3273
3274 fn exit<'a>(&mut self, context: &mut Self::Context<'a>) -> Result<(), String> {
3275 bpf_log!(self, context, "exit");
3276 if !self.reg(0)?.is_written_scalar() {
3277 return Err("register 0 is incorrect at exit time".to_string());
3278 }
3279 if !self.resources.is_empty() {
3280 return Err("some resources have not been released at exit time".to_string());
3281 }
3282 let this = self.clone();
3283 this.terminate(context)?;
3284 Ok(())
3286 }
3287
3288 fn jump<'a>(&mut self, context: &mut Self::Context<'a>, offset: i16) -> Result<(), String> {
3289 bpf_log!(self, context, "ja {}", offset);
3290 let parent = Some(Arc::new(self.clone()));
3291 context.states.push(self.jump_with_offset(offset, parent)?);
3292 Ok(())
3293 }
3294
3295 fn jeq<'a>(
3296 &mut self,
3297 context: &mut Self::Context<'a>,
3298 dst: Register,
3299 src: Source,
3300 offset: i16,
3301 ) -> Result<(), String> {
3302 self.conditional_jump(
3303 "jeq32",
3304 context,
3305 dst,
3306 src,
3307 offset,
3308 JumpType::Eq,
3309 JumpWidth::W32,
3310 |x, y| {
3311 comp32(x, y, |x, y| {
3312 if x.min == x.max && x.min == y.min && x.min == y.max {
3314 return Some(true);
3315 }
3316 if x.max < y.min || y.max < x.min {
3317 return Some(false);
3318 }
3319 None
3320 })
3321 },
3322 )
3323 }
3324 fn jeq64<'a>(
3325 &mut self,
3326 context: &mut Self::Context<'a>,
3327 dst: Register,
3328 src: Source,
3329 offset: i16,
3330 ) -> Result<(), String> {
3331 self.conditional_jump(
3332 "jeq",
3333 context,
3334 dst,
3335 src,
3336 offset,
3337 JumpType::Eq,
3338 JumpWidth::W64,
3339 |x, y| {
3340 comp64(x, y, |x, y| {
3341 if x.min == x.max && x.min == y.min && x.min == y.max {
3343 return Some(true);
3344 }
3345 if x.max < y.min || y.max < x.min {
3346 return Some(false);
3347 }
3348 None
3349 })
3350 },
3351 )
3352 }
3353 fn jne<'a>(
3354 &mut self,
3355 context: &mut Self::Context<'a>,
3356 dst: Register,
3357 src: Source,
3358 offset: i16,
3359 ) -> Result<(), String> {
3360 self.conditional_jump(
3361 "jne32",
3362 context,
3363 dst,
3364 src,
3365 offset,
3366 JumpType::Ne,
3367 JumpWidth::W32,
3368 |x, y| {
3369 comp32(x, y, |x, y| {
3370 if x.min == x.max && x.min == y.min && x.min == y.max {
3372 return Some(false);
3373 }
3374 if x.max < y.min || y.max < x.min {
3375 return Some(true);
3376 }
3377 None
3378 })
3379 },
3380 )
3381 }
3382 fn jne64<'a>(
3383 &mut self,
3384 context: &mut Self::Context<'a>,
3385 dst: Register,
3386 src: Source,
3387 offset: i16,
3388 ) -> Result<(), String> {
3389 self.conditional_jump(
3390 "jne",
3391 context,
3392 dst,
3393 src,
3394 offset,
3395 JumpType::Ne,
3396 JumpWidth::W64,
3397 |x, y| {
3398 comp64(x, y, |x, y| {
3399 if x.min == x.max && x.min == y.min && x.min == y.max {
3401 return Some(false);
3402 }
3403 if x.max < y.min || y.max < x.min {
3404 return Some(true);
3405 }
3406 None
3407 })
3408 },
3409 )
3410 }
3411 fn jge<'a>(
3412 &mut self,
3413 context: &mut Self::Context<'a>,
3414 dst: Register,
3415 src: Source,
3416 offset: i16,
3417 ) -> Result<(), String> {
3418 self.conditional_jump(
3419 "jge32",
3420 context,
3421 dst,
3422 src,
3423 offset,
3424 JumpType::Ge,
3425 JumpWidth::W32,
3426 |x, y| {
3427 comp32(x, y, |x, y| {
3428 if x.min >= y.max {
3430 return Some(true);
3431 }
3432 if y.min > x.max {
3433 return Some(false);
3434 }
3435 None
3436 })
3437 },
3438 )
3439 }
3440 fn jge64<'a>(
3441 &mut self,
3442 context: &mut Self::Context<'a>,
3443 dst: Register,
3444 src: Source,
3445 offset: i16,
3446 ) -> Result<(), String> {
3447 self.conditional_jump(
3448 "jge",
3449 context,
3450 dst,
3451 src,
3452 offset,
3453 JumpType::Ge,
3454 JumpWidth::W64,
3455 |x, y| {
3456 comp64(x, y, |x, y| {
3457 if x.min >= y.max {
3459 return Some(true);
3460 }
3461 if y.min > x.max {
3462 return Some(false);
3463 }
3464 None
3465 })
3466 },
3467 )
3468 }
3469 fn jgt<'a>(
3470 &mut self,
3471 context: &mut Self::Context<'a>,
3472 dst: Register,
3473 src: Source,
3474 offset: i16,
3475 ) -> Result<(), String> {
3476 self.conditional_jump(
3477 "jgt32",
3478 context,
3479 dst,
3480 src,
3481 offset,
3482 JumpType::Gt,
3483 JumpWidth::W32,
3484 |x, y| {
3485 comp32(x, y, |x, y| {
3486 if x.min > y.max {
3488 return Some(true);
3489 }
3490 if y.min >= x.max {
3491 return Some(false);
3492 }
3493 None
3494 })
3495 },
3496 )
3497 }
3498 fn jgt64<'a>(
3499 &mut self,
3500 context: &mut Self::Context<'a>,
3501 dst: Register,
3502 src: Source,
3503 offset: i16,
3504 ) -> Result<(), String> {
3505 self.conditional_jump(
3506 "jgt",
3507 context,
3508 dst,
3509 src,
3510 offset,
3511 JumpType::Gt,
3512 JumpWidth::W64,
3513 |x, y| {
3514 comp64(x, y, |x, y| {
3515 if x.min > y.max {
3517 return Some(true);
3518 }
3519 if y.min >= x.max {
3520 return Some(false);
3521 }
3522 None
3523 })
3524 },
3525 )
3526 }
3527 fn jle<'a>(
3528 &mut self,
3529 context: &mut Self::Context<'a>,
3530 dst: Register,
3531 src: Source,
3532 offset: i16,
3533 ) -> Result<(), String> {
3534 self.conditional_jump(
3535 "jle32",
3536 context,
3537 dst,
3538 src,
3539 offset,
3540 JumpType::Le,
3541 JumpWidth::W32,
3542 |x, y| {
3543 comp32(x, y, |x, y| {
3544 if x.max <= y.min {
3546 return Some(true);
3547 }
3548 if y.max < x.min {
3549 return Some(false);
3550 }
3551 None
3552 })
3553 },
3554 )
3555 }
3556 fn jle64<'a>(
3557 &mut self,
3558 context: &mut Self::Context<'a>,
3559 dst: Register,
3560 src: Source,
3561 offset: i16,
3562 ) -> Result<(), String> {
3563 self.conditional_jump(
3564 "jle",
3565 context,
3566 dst,
3567 src,
3568 offset,
3569 JumpType::Le,
3570 JumpWidth::W64,
3571 |x, y| {
3572 comp64(x, y, |x, y| {
3573 if x.max <= y.min {
3575 return Some(true);
3576 }
3577 if y.max < x.min {
3578 return Some(false);
3579 }
3580 None
3581 })
3582 },
3583 )
3584 }
3585 fn jlt<'a>(
3586 &mut self,
3587 context: &mut Self::Context<'a>,
3588 dst: Register,
3589 src: Source,
3590 offset: i16,
3591 ) -> Result<(), String> {
3592 self.conditional_jump(
3593 "jlt32",
3594 context,
3595 dst,
3596 src,
3597 offset,
3598 JumpType::Lt,
3599 JumpWidth::W32,
3600 |x, y| {
3601 comp32(x, y, |x, y| {
3602 if x.max < y.min {
3604 return Some(true);
3605 }
3606 if y.max <= x.min {
3607 return Some(false);
3608 }
3609 None
3610 })
3611 },
3612 )
3613 }
3614 fn jlt64<'a>(
3615 &mut self,
3616 context: &mut Self::Context<'a>,
3617 dst: Register,
3618 src: Source,
3619 offset: i16,
3620 ) -> Result<(), String> {
3621 self.conditional_jump(
3622 "jlt",
3623 context,
3624 dst,
3625 src,
3626 offset,
3627 JumpType::Lt,
3628 JumpWidth::W64,
3629 |x, y| {
3630 comp64(x, y, |x, y| {
3631 if x.max < y.min {
3633 return Some(true);
3634 }
3635 if y.max <= x.min {
3636 return Some(false);
3637 }
3638 None
3639 })
3640 },
3641 )
3642 }
3643 fn jsge<'a>(
3644 &mut self,
3645 context: &mut Self::Context<'a>,
3646 dst: Register,
3647 src: Source,
3648 offset: i16,
3649 ) -> Result<(), String> {
3650 self.conditional_jump(
3651 "jsge32",
3652 context,
3653 dst,
3654 src,
3655 offset,
3656 JumpType::LooseComparaison,
3657 JumpWidth::W32,
3658 |x, y| scomp32(x, y, |x, y| x >= y),
3659 )
3660 }
3661 fn jsge64<'a>(
3662 &mut self,
3663 context: &mut Self::Context<'a>,
3664 dst: Register,
3665 src: Source,
3666 offset: i16,
3667 ) -> Result<(), String> {
3668 self.conditional_jump(
3669 "jsge",
3670 context,
3671 dst,
3672 src,
3673 offset,
3674 JumpType::LooseComparaison,
3675 JumpWidth::W64,
3676 |x, y| scomp64(x, y, |x, y| x >= y),
3677 )
3678 }
3679 fn jsgt<'a>(
3680 &mut self,
3681 context: &mut Self::Context<'a>,
3682 dst: Register,
3683 src: Source,
3684 offset: i16,
3685 ) -> Result<(), String> {
3686 self.conditional_jump(
3687 "jsgt32",
3688 context,
3689 dst,
3690 src,
3691 offset,
3692 JumpType::StrictComparaison,
3693 JumpWidth::W32,
3694 |x, y| scomp32(x, y, |x, y| x > y),
3695 )
3696 }
3697 fn jsgt64<'a>(
3698 &mut self,
3699 context: &mut Self::Context<'a>,
3700 dst: Register,
3701 src: Source,
3702 offset: i16,
3703 ) -> Result<(), String> {
3704 self.conditional_jump(
3705 "jsgt",
3706 context,
3707 dst,
3708 src,
3709 offset,
3710 JumpType::StrictComparaison,
3711 JumpWidth::W64,
3712 |x, y| scomp64(x, y, |x, y| x > y),
3713 )
3714 }
3715 fn jsle<'a>(
3716 &mut self,
3717 context: &mut Self::Context<'a>,
3718 dst: Register,
3719 src: Source,
3720 offset: i16,
3721 ) -> Result<(), String> {
3722 self.conditional_jump(
3723 "jsle32",
3724 context,
3725 dst,
3726 src,
3727 offset,
3728 JumpType::LooseComparaison,
3729 JumpWidth::W32,
3730 |x, y| scomp32(x, y, |x, y| x <= y),
3731 )
3732 }
3733 fn jsle64<'a>(
3734 &mut self,
3735 context: &mut Self::Context<'a>,
3736 dst: Register,
3737 src: Source,
3738 offset: i16,
3739 ) -> Result<(), String> {
3740 self.conditional_jump(
3741 "jsle",
3742 context,
3743 dst,
3744 src,
3745 offset,
3746 JumpType::LooseComparaison,
3747 JumpWidth::W64,
3748 |x, y| scomp64(x, y, |x, y| x <= y),
3749 )
3750 }
3751 fn jslt<'a>(
3752 &mut self,
3753 context: &mut Self::Context<'a>,
3754 dst: Register,
3755 src: Source,
3756 offset: i16,
3757 ) -> Result<(), String> {
3758 self.conditional_jump(
3759 "jslt32",
3760 context,
3761 dst,
3762 src,
3763 offset,
3764 JumpType::StrictComparaison,
3765 JumpWidth::W32,
3766 |x, y| scomp32(x, y, |x, y| x < y),
3767 )
3768 }
3769 fn jslt64<'a>(
3770 &mut self,
3771 context: &mut Self::Context<'a>,
3772 dst: Register,
3773 src: Source,
3774 offset: i16,
3775 ) -> Result<(), String> {
3776 self.conditional_jump(
3777 "jslt",
3778 context,
3779 dst,
3780 src,
3781 offset,
3782 JumpType::StrictComparaison,
3783 JumpWidth::W64,
3784 |x, y| scomp64(x, y, |x, y| x < y),
3785 )
3786 }
3787 fn jset<'a>(
3788 &mut self,
3789 context: &mut Self::Context<'a>,
3790 dst: Register,
3791 src: Source,
3792 offset: i16,
3793 ) -> Result<(), String> {
3794 self.conditional_jump(
3795 "jset32",
3796 context,
3797 dst,
3798 src,
3799 offset,
3800 JumpType::Unknown,
3801 JumpWidth::W32,
3802 |x, y| {
3803 comp32(x, y, |x, y| {
3804 if x.min != x.max || y.min != y.max {
3806 return None;
3807 }
3808 Some(x.min & y.min != 0)
3809 })
3810 },
3811 )
3812 }
3813 fn jset64<'a>(
3814 &mut self,
3815 context: &mut Self::Context<'a>,
3816 dst: Register,
3817 src: Source,
3818 offset: i16,
3819 ) -> Result<(), String> {
3820 self.conditional_jump(
3821 "jset",
3822 context,
3823 dst,
3824 src,
3825 offset,
3826 JumpType::Unknown,
3827 JumpWidth::W64,
3828 |x, y| {
3829 comp64(x, y, |x, y| {
3830 if x.min != x.max || y.min != y.max {
3832 return None;
3833 }
3834 Some(x.min & y.min != 0)
3835 })
3836 },
3837 )
3838 }
3839
3840 fn atomic_add<'a>(
3841 &mut self,
3842 context: &mut Self::Context<'a>,
3843 fetch: bool,
3844 dst: Register,
3845 offset: i16,
3846 src: Register,
3847 ) -> Result<(), String> {
3848 self.atomic_operation(
3849 "add32",
3850 context,
3851 DataWidth::U32,
3852 fetch,
3853 dst,
3854 offset,
3855 src,
3856 AluType::Add,
3857 |x, y| alu32(x, y, |x, y| x + y),
3858 )
3859 }
3860
3861 fn atomic_add64<'a>(
3862 &mut self,
3863 context: &mut Self::Context<'a>,
3864 fetch: bool,
3865 dst: Register,
3866 offset: i16,
3867 src: Register,
3868 ) -> Result<(), String> {
3869 self.atomic_operation(
3870 "add",
3871 context,
3872 DataWidth::U64,
3873 fetch,
3874 dst,
3875 offset,
3876 src,
3877 AluType::Add,
3878 |x, y| x + y,
3879 )
3880 }
3881
3882 fn atomic_and<'a>(
3883 &mut self,
3884 context: &mut Self::Context<'a>,
3885 fetch: bool,
3886 dst: Register,
3887 offset: i16,
3888 src: Register,
3889 ) -> Result<(), String> {
3890 self.atomic_operation(
3891 "and32",
3892 context,
3893 DataWidth::U32,
3894 fetch,
3895 dst,
3896 offset,
3897 src,
3898 AluType::Plain,
3899 |x, y| alu32(x, y, |x, y| x & y),
3900 )
3901 }
3902
3903 fn atomic_and64<'a>(
3904 &mut self,
3905 context: &mut Self::Context<'a>,
3906 fetch: bool,
3907 dst: Register,
3908 offset: i16,
3909 src: Register,
3910 ) -> Result<(), String> {
3911 self.atomic_operation(
3912 "and",
3913 context,
3914 DataWidth::U64,
3915 fetch,
3916 dst,
3917 offset,
3918 src,
3919 AluType::Plain,
3920 |x, y| x & y,
3921 )
3922 }
3923
3924 fn atomic_or<'a>(
3925 &mut self,
3926 context: &mut Self::Context<'a>,
3927 fetch: bool,
3928 dst: Register,
3929 offset: i16,
3930 src: Register,
3931 ) -> Result<(), String> {
3932 self.atomic_operation(
3933 "or32",
3934 context,
3935 DataWidth::U32,
3936 fetch,
3937 dst,
3938 offset,
3939 src,
3940 AluType::Plain,
3941 |x, y| alu32(x, y, |x, y| x | y),
3942 )
3943 }
3944
3945 fn atomic_or64<'a>(
3946 &mut self,
3947 context: &mut Self::Context<'a>,
3948 fetch: bool,
3949 dst: Register,
3950 offset: i16,
3951 src: Register,
3952 ) -> Result<(), String> {
3953 self.atomic_operation(
3954 "or",
3955 context,
3956 DataWidth::U64,
3957 fetch,
3958 dst,
3959 offset,
3960 src,
3961 AluType::Plain,
3962 |x, y| x | y,
3963 )
3964 }
3965
3966 fn atomic_xor<'a>(
3967 &mut self,
3968 context: &mut Self::Context<'a>,
3969 fetch: bool,
3970 dst: Register,
3971 offset: i16,
3972 src: Register,
3973 ) -> Result<(), String> {
3974 self.atomic_operation(
3975 "xor32",
3976 context,
3977 DataWidth::U32,
3978 fetch,
3979 dst,
3980 offset,
3981 src,
3982 AluType::Plain,
3983 |x, y| alu32(x, y, |x, y| x ^ y),
3984 )
3985 }
3986
3987 fn atomic_xor64<'a>(
3988 &mut self,
3989 context: &mut Self::Context<'a>,
3990 fetch: bool,
3991 dst: Register,
3992 offset: i16,
3993 src: Register,
3994 ) -> Result<(), String> {
3995 self.atomic_operation(
3996 "xor",
3997 context,
3998 DataWidth::U64,
3999 fetch,
4000 dst,
4001 offset,
4002 src,
4003 AluType::Plain,
4004 |x, y| x ^ y,
4005 )
4006 }
4007
4008 fn atomic_xchg<'a>(
4009 &mut self,
4010 context: &mut Self::Context<'a>,
4011 fetch: bool,
4012 dst: Register,
4013 offset: i16,
4014 src: Register,
4015 ) -> Result<(), String> {
4016 self.atomic_operation(
4017 "xchg32",
4018 context,
4019 DataWidth::U32,
4020 fetch,
4021 dst,
4022 offset,
4023 src,
4024 AluType::Plain,
4025 |_, x| x,
4026 )
4027 }
4028
4029 fn atomic_xchg64<'a>(
4030 &mut self,
4031 context: &mut Self::Context<'a>,
4032 fetch: bool,
4033 dst: Register,
4034 offset: i16,
4035 src: Register,
4036 ) -> Result<(), String> {
4037 self.raw_atomic_operation(
4038 "xchg",
4039 context,
4040 DataWidth::U64,
4041 fetch,
4042 dst,
4043 offset,
4044 src,
4045 |_, _, x| Ok(x),
4046 )
4047 }
4048
4049 fn atomic_cmpxchg<'a>(
4050 &mut self,
4051 context: &mut Self::Context<'a>,
4052 dst: Register,
4053 offset: i16,
4054 src: Register,
4055 ) -> Result<(), String> {
4056 self.raw_atomic_cmpxchg("cmpxchg32", context, dst, offset, src, JumpWidth::W32, |x, y| {
4057 comp32(x, y, |x, y| {
4058 if x.min == x.max && x.min == y.min && x.min == y.max {
4060 return Some(true);
4061 }
4062 if x.max < y.min || y.max < x.min {
4063 return Some(false);
4064 }
4065 None
4066 })
4067 })
4068 }
4069
4070 fn atomic_cmpxchg64<'a>(
4071 &mut self,
4072 context: &mut Self::Context<'a>,
4073 dst: Register,
4074 offset: i16,
4075 src: Register,
4076 ) -> Result<(), String> {
4077 self.raw_atomic_cmpxchg("cmpxchg", context, dst, offset, src, JumpWidth::W64, |x, y| {
4078 comp64(x, y, |x, y| {
4079 if x.min == x.max && x.min == y.min && x.min == y.max {
4081 return Some(true);
4082 }
4083 if x.max < y.min || y.max < x.min {
4084 return Some(false);
4085 }
4086 None
4087 })
4088 })
4089 }
4090
4091 fn load<'a>(
4092 &mut self,
4093 context: &mut Self::Context<'a>,
4094 dst: Register,
4095 offset: i16,
4096 src: Register,
4097 width: DataWidth,
4098 ) -> Result<(), String> {
4099 bpf_log!(
4100 self,
4101 context,
4102 "ldx{} {}, [{}{}]",
4103 width.str(),
4104 display_register(dst),
4105 display_register(src),
4106 print_offset(offset)
4107 );
4108 let addr = self.reg(src)?;
4109 let loaded_type = self.load_memory(context, &addr, Field::new(offset, width))?;
4110 let mut next = self.next()?;
4111 next.set_reg(dst, loaded_type)?;
4112 context.states.push(next);
4113 Ok(())
4114 }
4115
4116 fn load64<'a>(
4117 &mut self,
4118 context: &mut Self::Context<'a>,
4119 dst: Register,
4120 src: u8,
4121 lower: u32,
4122 ) -> Result<(), String> {
4123 let Some(next_instruction) = context.code.get(self.pc + 1) else {
4124 return Err(format!("incomplete lddw"));
4125 };
4126 if next_instruction.src_reg() != 0 || next_instruction.dst_reg() != 0 {
4127 return Err(format!("invalid lddw"));
4128 }
4129
4130 let value = match src {
4131 0 => {
4132 let value = (lower as u64) | (((next_instruction.imm() as u32) as u64) << 32);
4133 bpf_log!(self, context, "lddw {}, 0x{:x}", display_register(dst), value);
4134 Type::from(value)
4135 }
4136 BPF_PSEUDO_MAP_IDX => {
4137 let map_index = lower;
4138 bpf_log!(
4139 self,
4140 context,
4141 "lddw {}, map_by_index({:x})",
4142 display_register(dst),
4143 map_index
4144 );
4145 context
4146 .calling_context
4147 .maps
4148 .get(usize::try_from(map_index).unwrap())
4149 .map(|schema| Type::ConstPtrToMap { id: map_index.into(), schema: *schema })
4150 .ok_or_else(|| format!("lddw with invalid map index: {}", map_index))?
4151 }
4152 BPF_PSEUDO_MAP_IDX_VALUE => {
4153 let map_index = lower;
4154 let offset = next_instruction.imm();
4155 bpf_log!(
4156 self,
4157 context,
4158 "lddw {}, map_value_by_index({:x})+{offset}",
4159 display_register(dst),
4160 map_index
4161 );
4162 let id = context.next_id();
4163 let map_schema = context
4164 .calling_context
4165 .maps
4166 .get(usize::try_from(map_index).unwrap())
4167 .ok_or_else(|| format!("lddw with invalid map index: {}", map_index))?;
4168
4169 if map_schema.map_type != bpf_map_type_BPF_MAP_TYPE_ARRAY {
4170 return Err(format!(
4171 "Invalid map type at index {map_index} for lddw. Expecting array."
4172 ));
4173 }
4174 if map_schema.max_entries == 0 {
4175 return Err(format!("Array has no entry."));
4176 }
4177
4178 Type::PtrToMemory {
4179 id: MemoryId::from(id),
4180 offset: offset.into(),
4181 buffer_size: map_schema.value_size.into(),
4182 }
4183 }
4184 _ => {
4185 return Err(format!("invalid lddw"));
4186 }
4187 };
4188
4189 let parent = Some(Arc::new(self.clone()));
4190 let mut next = self.jump_with_offset(1, parent)?;
4191 next.set_reg(dst, value.into())?;
4192
4193 context.states.push(next);
4194 Ok(())
4195 }
4196
4197 fn load_from_packet<'a>(
4198 &mut self,
4199 context: &mut Self::Context<'a>,
4200 dst: Register,
4201 src: Register,
4202 offset: i32,
4203 register_offset: Option<Register>,
4204 width: DataWidth,
4205 ) -> Result<(), String> {
4206 bpf_log!(
4207 self,
4208 context,
4209 "ldp{} {}{}",
4210 width.str(),
4211 register_offset.map(display_register).unwrap_or_else(Default::default),
4212 print_offset(offset)
4213 );
4214
4215 let src_type = self.reg(src)?;
4217 let src_is_packet = match &context.calling_context.packet_type {
4218 Some(packet_type) => src_type == *packet_type,
4219 None => false,
4220 };
4221 if !src_is_packet {
4222 return Err(format!("R{} is not a packet", src));
4223 }
4224
4225 if let Some(reg) = register_offset {
4226 let reg = self.reg(reg)?;
4227 if !reg.is_written_scalar() {
4228 return Err("access to unwritten offset".to_string());
4229 }
4230 }
4231 let mut next = self.next()?;
4233 next.set_reg(dst, Type::UNKNOWN_SCALAR)?;
4234 for i in 1..=5 {
4235 next.set_reg(i, Type::default())?;
4236 }
4237 context.states.push(next);
4238 if !self.reg(0)?.is_written_scalar() {
4240 return Err("register 0 is incorrect at exit time".to_string());
4241 }
4242 if !self.resources.is_empty() {
4243 return Err("some resources have not been released at exit time".to_string());
4244 }
4245 let this = self.clone();
4246 this.terminate(context)?;
4247 Ok(())
4249 }
4250
4251 fn store<'a>(
4252 &mut self,
4253 context: &mut Self::Context<'a>,
4254 dst: Register,
4255 offset: i16,
4256 src: Source,
4257 width: DataWidth,
4258 ) -> Result<(), String> {
4259 let value = match src {
4260 Source::Reg(r) => {
4261 bpf_log!(
4262 self,
4263 context,
4264 "stx{} [{}{}], {}",
4265 width.str(),
4266 display_register(dst),
4267 print_offset(offset),
4268 display_register(r),
4269 );
4270 self.reg(r)?
4271 }
4272 Source::Value(v) => {
4273 bpf_log!(
4274 self,
4275 context,
4276 "st{} [{}{}], 0x{:x}",
4277 width.str(),
4278 display_register(dst),
4279 print_offset(offset),
4280 v,
4281 );
4282 Type::from(v & Type::mask(width))
4283 }
4284 };
4285 let mut next = self.next()?;
4286 let addr = self.reg(dst)?;
4287 next.store_memory(context, &addr, Field::new(offset, width), value)?;
4288 context.states.push(next);
4289 Ok(())
4290 }
4291}
4292
4293fn alu32(
4294 x: ScalarValueData,
4295 y: ScalarValueData,
4296 op: impl FnOnce(U32ScalarValueData, U32ScalarValueData) -> U32ScalarValueData,
4297) -> ScalarValueData {
4298 op(U32ScalarValueData::from(x), U32ScalarValueData::from(y)).into()
4299}
4300
4301fn comp64(
4302 x: ScalarValueData,
4303 y: ScalarValueData,
4304 op: impl FnOnce(U64Range, U64Range) -> Option<bool>,
4305) -> Result<Option<bool>, ()> {
4306 if !x.is_fully_initialized() || !y.is_fully_initialized() {
4307 return Err(());
4308 }
4309 Ok(op(x.urange, y.urange))
4310}
4311
4312fn comp32(
4313 x: ScalarValueData,
4314 y: ScalarValueData,
4315 op: impl FnOnce(U32Range, U32Range) -> Option<bool>,
4316) -> Result<Option<bool>, ()> {
4317 let x = U32ScalarValueData::from(x);
4318 let y = U32ScalarValueData::from(y);
4319 if !x.is_fully_initialized() || !y.is_fully_initialized() {
4320 return Err(());
4321 }
4322 Ok(op(x.urange, y.urange))
4323}
4324
4325fn scomp64(
4326 x: ScalarValueData,
4327 y: ScalarValueData,
4328 op: impl FnOnce(i64, i64) -> bool,
4329) -> Result<Option<bool>, ()> {
4330 if !x.is_fully_initialized() || !y.is_fully_initialized() {
4331 return Err(());
4332 }
4333 if !x.is_known() || !y.is_known() {
4334 return Ok(None);
4335 }
4336 Ok(Some(op(x.value as i64, y.value as i64)))
4337}
4338
4339fn scomp32(
4340 x: ScalarValueData,
4341 y: ScalarValueData,
4342 op: impl FnOnce(i32, i32) -> bool,
4343) -> Result<Option<bool>, ()> {
4344 let x = U32ScalarValueData::from(x);
4345 let y = U32ScalarValueData::from(y);
4346 if !x.is_fully_initialized() || !y.is_fully_initialized() {
4347 return Err(());
4348 }
4349 if !x.is_known() || !y.is_known() {
4350 return Ok(None);
4351 }
4352 Ok(Some(op(x.value as i32, y.value as i32)))
4353}
4354
4355fn print_offset<T: Into<i32>>(offset: T) -> String {
4356 let offset: i32 = offset.into();
4357 if offset == 0 {
4358 String::new()
4359 } else if offset > 0 {
4360 format!("+{offset}")
4361 } else {
4362 format!("{offset}")
4363 }
4364}
4365
4366fn run_on_stack_offset<F>(v: StackOffset, f: F) -> StackOffset
4367where
4368 F: FnOnce(ScalarValueData) -> ScalarValueData,
4369{
4370 StackOffset(f(v.reg()))
4371}
4372
4373fn error_and_log<T>(
4374 logger: &mut dyn VerifierLogger,
4375 msg: impl std::string::ToString,
4376) -> Result<T, EbpfError> {
4377 let msg = msg.to_string();
4378 logger.log(msg.as_bytes());
4379 return Err(EbpfError::ProgramVerifyError(msg));
4380}
4381
4382fn associate_orderings(o1: Ordering, o2: Ordering) -> Option<Ordering> {
4383 match (o1, o2) {
4384 (o1, o2) if o1 == o2 => Some(o1),
4385 (o, Ordering::Equal) | (Ordering::Equal, o) => Some(o),
4386 _ => None,
4387 }
4388}
4389
4390#[cfg(test)]
4391mod tests {
4392 use super::*;
4393 use std::collections::BTreeSet;
4394 use test_util::{assert_geq, assert_leq};
4395
4396 #[test]
4397 fn test_type_ordering() {
4398 let t0 = Type::from(0);
4399 let t1 = Type::from(1);
4400 let random = Type::AliasParameter { parameter_index: 8 };
4401 let unknown_written = Type::UNKNOWN_SCALAR;
4402 let unwritten = Type::default();
4403
4404 assert_eq!(t0.partial_cmp(&t0), Some(Ordering::Equal));
4405 assert_eq!(t0.partial_cmp(&t1), None);
4406 assert_eq!(t0.partial_cmp(&random), None);
4407 assert_eq!(t0.partial_cmp(&unknown_written), Some(Ordering::Less));
4408 assert_eq!(t0.partial_cmp(&unwritten), Some(Ordering::Less));
4409
4410 assert_eq!(t1.partial_cmp(&t0), None);
4411 assert_eq!(t1.partial_cmp(&t1), Some(Ordering::Equal));
4412 assert_eq!(t1.partial_cmp(&random), None);
4413 assert_eq!(t1.partial_cmp(&unknown_written), Some(Ordering::Less));
4414 assert_eq!(t1.partial_cmp(&unwritten), Some(Ordering::Less));
4415
4416 assert_eq!(random.partial_cmp(&t0), None);
4417 assert_eq!(random.partial_cmp(&t1), None);
4418 assert_eq!(random.partial_cmp(&random), Some(Ordering::Equal));
4419 assert_eq!(random.partial_cmp(&unknown_written), None);
4420 assert_eq!(random.partial_cmp(&unwritten), Some(Ordering::Less));
4421
4422 assert_eq!(unknown_written.partial_cmp(&t0), Some(Ordering::Greater));
4423 assert_eq!(unknown_written.partial_cmp(&t1), Some(Ordering::Greater));
4424 assert_eq!(unknown_written.partial_cmp(&random), None);
4425 assert_eq!(unknown_written.partial_cmp(&unknown_written), Some(Ordering::Equal));
4426 assert_eq!(unknown_written.partial_cmp(&unwritten), Some(Ordering::Less));
4427
4428 assert_eq!(unwritten.partial_cmp(&t0), Some(Ordering::Greater));
4429 assert_eq!(unwritten.partial_cmp(&t1), Some(Ordering::Greater));
4430 assert_eq!(unwritten.partial_cmp(&random), Some(Ordering::Greater));
4431 assert_eq!(unwritten.partial_cmp(&unknown_written), Some(Ordering::Greater));
4432 assert_eq!(unwritten.partial_cmp(&unwritten), Some(Ordering::Equal));
4433 }
4434
4435 #[test]
4436 fn test_stack_ordering() {
4437 let mut s1 = Stack::default();
4438 let mut s2 = Stack::default();
4439
4440 assert_eq!(s1.partial_cmp(&s2), Some(Ordering::Equal));
4441 s1.set(0, 0.into());
4442 assert_eq!(s1.partial_cmp(&s2), Some(Ordering::Less));
4443 assert_eq!(s2.partial_cmp(&s1), Some(Ordering::Greater));
4444 s2.set(1, 1.into());
4445 assert_eq!(s1.partial_cmp(&s2), None);
4446 assert_eq!(s2.partial_cmp(&s1), None);
4447 }
4448
4449 #[test]
4450 fn test_context_ordering() {
4451 let mut c1 = ComputationContext::default();
4452 let mut c2 = ComputationContext::default();
4453
4454 assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Equal));
4455
4456 c1.array_bounds.insert(1.into(), 5);
4457 assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Less));
4458 assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Greater));
4459
4460 c2.array_bounds.insert(1.into(), 7);
4461 assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4462 assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4463
4464 c1.array_bounds.insert(2.into(), 9);
4465 assert_eq!(c1.partial_cmp(&c2), None);
4466 assert_eq!(c2.partial_cmp(&c1), None);
4467
4468 c2.array_bounds.insert(2.into(), 9);
4469 assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4470 assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4471
4472 c2.array_bounds.insert(3.into(), 12);
4473 assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4474 assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4475
4476 c1.pc = 8;
4477 assert_eq!(c1.partial_cmp(&c2), None);
4478 assert_eq!(c2.partial_cmp(&c1), None);
4479 }
4480
4481 #[test]
4482 fn test_stack_access() {
4483 let mut s = Stack::default();
4484
4485 assert!(s.store(StackOffset(8.into()), Type::UNKNOWN_SCALAR, DataWidth::U64).is_ok());
4488 assert!(s.store(StackOffset(16.into()), Type::UNKNOWN_SCALAR, DataWidth::U64).is_ok());
4489 assert!(s.store(StackOffset(24.into()), Type::UNKNOWN_SCALAR, DataWidth::U16).is_ok());
4490
4491 for offset in 0..32 {
4492 for end in (offset + 1)..32 {
4493 assert_eq!(
4494 s.read_data_ptr(2, StackOffset(offset.into()), (end - offset) as u64).is_ok(),
4495 offset >= 8 && end <= 26
4496 );
4497 }
4498 }
4499
4500 assert!(s.read_data_ptr(2, StackOffset(12.into()), u64::MAX - 2).is_err());
4502 }
4503
4504 #[test]
4505 fn test_compute_range_for_bytes_swap() {
4506 let mut values = BTreeSet::<u64>::default();
4509 for v1 in &[0x00, 0x1, u64::MAX] {
4510 for v2 in &[0x00, 0x1, u64::MAX] {
4511 for v3 in &[0x00, 0x1, u64::MAX] {
4512 values.insert(U64Range::assemble_slices((*v1, *v2, *v3), 1, 1));
4513 }
4514 }
4515 }
4516 let store = |old: u64, new: u64| (old & !0xff00) | ((new & 0xff) << 8);
4518
4519 for old in &values {
4520 for new in &values {
4521 let s = store(*old, *new);
4522 for min_old in values.iter().filter(|v| *v <= old) {
4523 for min_new in values.iter().filter(|v| *v <= new) {
4524 for max_old in values.iter().filter(|v| *v >= old) {
4525 for max_new in values.iter().filter(|v| *v >= new) {
4526 let range = U64Range::compute_range_for_bytes_swap(
4527 U64Range::new(*min_old, *max_old),
4528 U64Range::new(*min_new, *max_new),
4529 1,
4530 0,
4531 1,
4532 );
4533 assert_leq!(range.min, s);
4534 assert_geq!(range.max, s);
4535 }
4536 }
4537 }
4538 }
4539 }
4540 }
4541 }
4542
4543 #[test]
4544 fn test_type_constraint() {
4545 let mut context = ComputationContext::default();
4546 let (new_lhs, new_rhs) = Type::constraint(
4547 &mut context,
4548 JumpType::Ge,
4549 JumpWidth::W64,
4550 Type::UNKNOWN_SCALAR,
4551 Type::from(10),
4552 )
4553 .unwrap();
4554
4555 if let Type::ScalarValue(data1) = new_lhs {
4556 assert_eq!(data1.min(), 10);
4557 assert_eq!(data1.max(), u64::MAX);
4558 } else {
4559 panic!("Expected ScalarValue");
4560 }
4561
4562 if let Type::ScalarValue(data2) = new_rhs {
4563 assert_eq!(data2.min(), 10);
4564 assert_eq!(data2.max(), 10);
4565 } else {
4566 panic!("Expected ScalarValue");
4567 }
4568
4569 let (new_lhs, new_rhs) = Type::constraint(
4570 &mut context,
4571 JumpType::Gt,
4572 JumpWidth::W64,
4573 Type::UNKNOWN_SCALAR,
4574 Type::from(10),
4575 )
4576 .unwrap();
4577
4578 if let Type::ScalarValue(data1) = new_lhs {
4579 assert_eq!(data1.min(), 11);
4580 assert_eq!(data1.max(), u64::MAX);
4581 } else {
4582 panic!("Expected ScalarValue");
4583 }
4584 }
4585}