Skip to main content

ebpf/
verifier.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#![allow(unused_variables)]
6
7use crate::scalar_value::{ScalarValueData, U32Range, U32ScalarValueData, U64Range};
8use crate::visitor::{BpfVisitor, ProgramCounter, Register, Source};
9use crate::{
10    BPF_MAX_INSTS, BPF_PSEUDO_MAP_IDX, BPF_PSEUDO_MAP_IDX_VALUE, BPF_STACK_SIZE, DataWidth,
11    EbpfError, EbpfInstruction, GENERAL_REGISTER_COUNT, MapSchema, REGISTER_COUNT,
12};
13use byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian};
14use fuchsia_sync::Mutex;
15use linux_uapi::bpf_map_type_BPF_MAP_TYPE_ARRAY;
16use std::cmp::Ordering;
17use std::collections::{BTreeMap, HashMap, HashSet};
18use std::sync::Arc;
19use zerocopy::IntoBytes;
20
21const U32_MAX: u64 = u32::MAX as u64;
22
23/// A trait to receive the log from the verifier.
24pub trait VerifierLogger {
25    /// Log a line. The line is always a correct encoded ASCII string.
26    fn log(&mut self, line: &[u8]);
27}
28
29/// A `VerifierLogger` that drops all its content.
30pub struct NullVerifierLogger;
31
32impl VerifierLogger for NullVerifierLogger {
33    fn log(&mut self, line: &[u8]) {
34        debug_assert!(line.is_ascii());
35    }
36}
37
38/// An identifier for a memory buffer accessible by an ebpf program. The identifiers are built as a
39/// chain of unique identifier so that a buffer can contain multiple pointers to the same type and
40/// the verifier can distinguish between the different instances.
41#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
42pub struct MemoryId {
43    id: u64,
44    parent: Option<Box<MemoryId>>,
45}
46
47impl MemoryId {
48    pub fn id(&self) -> u64 {
49        self.id
50    }
51}
52
53impl From<u64> for MemoryId {
54    fn from(id: u64) -> Self {
55        Self { id, parent: None }
56    }
57}
58
59/// A counter that allows to generate new ids for parameters. The namespace is the same as for id
60/// generated for types while verifying an ebpf program, but it is started a u64::MAX / 2 and so is
61/// guaranteed to never collide because the number of instruction of an ebpf program are bounded.
62static BPF_TYPE_IDENTIFIER_COUNTER: std::sync::atomic::AtomicU64 =
63    std::sync::atomic::AtomicU64::new(u64::MAX / 2);
64
65impl MemoryId {
66    pub const fn from_raw(id: u64) -> MemoryId {
67        Self { id, parent: None }
68    }
69
70    pub fn new() -> MemoryId {
71        Self {
72            id: BPF_TYPE_IDENTIFIER_COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
73            parent: None,
74        }
75    }
76
77    /// Build a new id such that `other` is prepended to the chain of parent of `self`.
78    fn prepended(&self, other: MemoryId) -> Self {
79        match &self.parent {
80            None => MemoryId { id: self.id, parent: Some(Box::new(other)) },
81            Some(parent) => {
82                MemoryId { id: self.id, parent: Some(Box::new(parent.prepended(other))) }
83            }
84        }
85    }
86
87    /// Returns whether the parent of `self` is `parent`
88    fn has_parent(&self, parent: &MemoryId) -> bool {
89        match &self.parent {
90            None => false,
91            Some(p) => p.as_ref() == parent,
92        }
93    }
94
95    /// Returns whether `self` and `other` represent the same type. This is true if both have the
96    /// same id and if they both have parent, parents must also match.
97    fn matches(&self, other: &MemoryId) -> bool {
98        if self.id != other.id {
99            return false;
100        };
101        match (&self.parent, &other.parent) {
102            (Some(p1), Some(p2)) => p1.matches(p2.as_ref()),
103            _ => true,
104        }
105    }
106}
107
108/// The type of a field in a struct pointed by `Type::PtrToStruct`.
109#[derive(Clone, Debug, PartialEq)]
110pub enum FieldType {
111    /// Read-only scalar value.
112    Scalar { size: usize },
113
114    /// Mutable scalar value.
115    MutableScalar { size: usize },
116
117    /// A pointer to the kernel memory. The full buffer is `buffer_size` bytes long.
118    PtrToMemory { is_32_bit: bool, id: MemoryId, buffer_size: usize },
119
120    /// A nullable pointer to the kernel memory. The full buffer is `buffer_size` bytes long.
121    NullablePtrToMemory { is_32_bit: bool, id: MemoryId, buffer_size: usize },
122
123    /// A pointer to the kernel memory. The full buffer size is determined by an instance of
124    /// `PtrToEndArray` with the same `id`.
125    PtrToArray { is_32_bit: bool, id: MemoryId },
126
127    /// A pointer to the kernel memory that represents the first non accessible byte of a
128    /// `PtrToArray` with the same `id`.
129    PtrToEndArray { is_32_bit: bool, id: MemoryId },
130}
131
132/// Definition of a field in a `Type::PtrToStruct`.
133#[derive(Clone, Debug, PartialEq)]
134pub struct FieldDescriptor {
135    /// The offset at which the field is located.
136    pub offset: usize,
137
138    /// The type of the pointed memory. Currently the verifier supports only `PtrToArray`,
139    /// `PtrToStruct`, `PtrToEndArray` and `ScalarValue`.
140    pub field_type: FieldType,
141}
142
143impl FieldDescriptor {
144    fn size(&self) -> usize {
145        match self.field_type {
146            FieldType::Scalar { size } | FieldType::MutableScalar { size } => size,
147            FieldType::PtrToMemory { is_32_bit, .. }
148            | FieldType::NullablePtrToMemory { is_32_bit, .. }
149            | FieldType::PtrToArray { is_32_bit, .. }
150            | FieldType::PtrToEndArray { is_32_bit, .. } => {
151                if is_32_bit {
152                    4
153                } else {
154                    8
155                }
156            }
157        }
158    }
159}
160
161/// The offset and width of a field in a struct.
162#[derive(Clone, Copy, Debug, Eq, PartialEq)]
163struct Field {
164    offset: i16,
165    width: DataWidth,
166}
167
168impl Field {
169    fn new(offset: i16, width: DataWidth) -> Self {
170        Self { offset, width }
171    }
172}
173
174/// Defines field layout in a struct pointed by `Type::PtrToStruct`.
175#[derive(Debug, PartialEq, Default)]
176pub struct StructDescriptor {
177    /// The list of fields.
178    pub fields: Vec<FieldDescriptor>,
179}
180
181impl StructDescriptor {
182    /// Return true if `self` is a subtype of `super_struct`.
183    fn is_subtype(&self, super_struct: &StructDescriptor) -> bool {
184        // Check that `super_struct.fields` is a subset of `fields`.
185        for super_field in super_struct.fields.iter() {
186            if self.fields.iter().find(|field| *field == super_field).is_none() {
187                return false;
188            }
189        }
190        true
191    }
192
193    /// Finds the field type for load/store at the specified location. None is returned if the
194    /// access is invalid and the program must be rejected.
195    fn find_field(&self, base_offset: ScalarValueData, field: Field) -> Option<&FieldDescriptor> {
196        let offset = base_offset + field.offset;
197        let field_desc = self.fields.iter().find(|f| {
198            f.offset <= offset.min() as usize && (offset.max() as usize) < f.offset + f.size()
199        })?;
200        let is_valid_load = match field_desc.field_type {
201            FieldType::Scalar { size } | FieldType::MutableScalar { size } => {
202                // For scalars check that the access is within the bounds of the field.
203                ((offset + field.width.bytes()).max() as usize) <= field_desc.offset + size
204            }
205            FieldType::PtrToMemory { is_32_bit, .. }
206            | FieldType::NullablePtrToMemory { is_32_bit, .. }
207            | FieldType::PtrToArray { is_32_bit, .. }
208            | FieldType::PtrToEndArray { is_32_bit, .. } => {
209                let expected_width = if is_32_bit { DataWidth::U32 } else { DataWidth::U64 };
210                // Pointer loads are expected to load the whole field.
211                offset.is_known()
212                    && offset.value as usize == field_desc.offset
213                    && field.width == expected_width
214            }
215        };
216
217        is_valid_load.then_some(field_desc)
218    }
219}
220
221#[derive(Clone, Debug, PartialEq)]
222pub enum MemoryParameterSize {
223    /// The memory buffer have the given size.
224    Value(u64),
225    /// The memory buffer size is given by the parameter in the given index.
226    Reference { index: u8 },
227}
228
229impl MemoryParameterSize {
230    fn size(&self, context: &ComputationContext) -> Result<u64, String> {
231        match self {
232            Self::Value(size) => Ok(*size),
233            Self::Reference { index } => {
234                let size_type = context.reg(index + 1)?;
235                match size_type {
236                    Type::ScalarValue(data) if data.is_known() => Ok(data.value),
237                    _ => Err("cannot know buffer size".to_string()),
238                }
239            }
240        }
241    }
242}
243
244#[derive(Clone, Debug, PartialEq)]
245pub enum Type {
246    /// A number.
247    ScalarValue(ScalarValueData),
248    /// A pointer to a map object.
249    ConstPtrToMap { id: u64, schema: MapSchema },
250    /// A pointer into the stack.
251    PtrToStack { offset: StackOffset },
252    /// A pointer to the kernel memory. The full buffer is `buffer_size` bytes long. The pointer is
253    /// situated at `offset` from the start of the buffer.
254    PtrToMemory { id: MemoryId, offset: ScalarValueData, buffer_size: u64 },
255    /// A pointer to a struct with the specified `StructDescriptor`.
256    PtrToStruct { id: MemoryId, offset: ScalarValueData, descriptor: Arc<StructDescriptor> },
257    /// A pointer to the kernel memory. The full buffer size is determined by an instance of
258    /// `PtrToEndArray` with the same `id`. The pointer is situadted at `offset` from the start of
259    /// the buffer.
260    PtrToArray { id: MemoryId, offset: ScalarValueData },
261    /// A pointer to the kernel memory that represents the first non accessible byte of a
262    /// `PtrToArray` with the same `id`.
263    PtrToEndArray { id: MemoryId },
264    /// A pointer that might be null and must be validated before being referenced.
265    NullOr { id: MemoryId, inner: Box<Type> },
266    /// An object that must be passed to a method with an associated `ReleaseParameter` before the
267    /// end of the program.
268    Releasable { id: MemoryId, inner: Box<Type> },
269    /// A function parameter that must be a `ScalarValue` when called.
270    ScalarValueParameter,
271    /// A function parameter that must be a `ConstPtrToMap` when called.
272    ConstPtrToMapParameter,
273    /// A function parameter that must be a key of a map.
274    MapKeyParameter {
275        /// The index in the arguments list that contains a `ConstPtrToMap` for the map this key is
276        /// associated with.
277        map_ptr_index: u8,
278    },
279    /// A function parameter that must be a value of a map.
280    MapValueParameter {
281        /// The index in the arguments list that contains a `ConstPtrToMap` for the map this key is
282        /// associated with.
283        map_ptr_index: u8,
284    },
285    /// A function parameter that must be a pointer to memory.
286    MemoryParameter {
287        /// The index in the arguments list that contains a scalar value containing the size of the
288        /// memory.
289        size: MemoryParameterSize,
290        /// Whether this memory is read by the function.
291        input: bool,
292        /// Whether this memory is written by the function.
293        output: bool,
294    },
295    /// A function return value that is the same type as a parameter.
296    AliasParameter {
297        /// The index in the argument list of the parameter that has the type of this return value.
298        parameter_index: u8,
299    },
300    /// A function return value that is either null, or the given type.
301    NullOrParameter(Box<Type>),
302    /// A function parameter that must be a pointer to memory with the given id.
303    StructParameter { id: MemoryId },
304    /// A function return value that must be passed to a method with an associated
305    /// `ReleaseParameter` before the end of the program.
306    ReleasableParameter { id: MemoryId, inner: Box<Type> },
307    /// A function parameter that will release the value.
308    ReleaseParameter { id: MemoryId },
309    /// A function parameter that is not checked.
310    AnyParameter,
311}
312
313/// Defines a partial ordering on `Type` instances, capturing the notion of how "broad"
314/// a type is in terms of the set of potential values it represents.
315///
316/// The ordering is defined such that `t1 > t2` if a proof that an eBPF program terminates
317/// in a state where a register or memory location has type `t1` is also a proof that
318/// the program terminates in a state where that location has type `t2`.
319///
320/// In other words, a "broader" type represents a larger set of possible values, and
321/// proving termination with a broader type implies termination with any narrower type.
322///
323/// Examples:
324/// * `Type::ScalarValue(ScalarValueData({ unknown_mask: 0, .. }))` (a known scalar value) is less than
325///   `Type::ScalarValue(ScalarValueData({ unknown_mask: u64::MAX, .. }))` (an unknown scalar value).
326impl PartialOrd for Type {
327    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
328        // If the values are equals, return the known result.
329        if self == other {
330            return Some(Ordering::Equal);
331        }
332
333        // If one value is not initialized, the types are ordered.
334        if self == &Type::UNINITIALIZED {
335            return Some(Ordering::Greater);
336        }
337        if other == &Type::UNINITIALIZED {
338            return Some(Ordering::Less);
339        }
340
341        // Otherwise, only scalars are comparables.
342        match (self, other) {
343            (Self::ScalarValue(data1), Self::ScalarValue(data2)) => data1.partial_cmp(data2),
344            _ => None,
345        }
346    }
347}
348
349impl From<ScalarValueData> for Type {
350    fn from(value: ScalarValueData) -> Self {
351        Self::ScalarValue(value)
352    }
353}
354
355impl From<u64> for Type {
356    fn from(value: u64) -> Self {
357        Self::ScalarValue(value.into())
358    }
359}
360
361impl Default for Type {
362    /// A new instance of `Type` where no bit has been written yet.
363    fn default() -> Self {
364        Self::UNINITIALIZED.clone()
365    }
366}
367
368impl Type {
369    /// An uninitialized value.
370    pub const UNINITIALIZED: Self = Self::ScalarValue(ScalarValueData::UNINITIALIZED);
371
372    /// A `Type` where the data is usable by userspace, but the value is not known by the verifier.
373    pub const UNKNOWN_SCALAR: Self = Self::ScalarValue(ScalarValueData::UNKNOWN_WRITTEN);
374
375    /// The mask associated with a data of size `width`.
376    fn mask(width: DataWidth) -> u64 {
377        if width == DataWidth::U64 { u64::MAX } else { (1 << width.bits()) - 1 }
378    }
379
380    /// Return true if `self` is a scalar with all bytes initialized.
381    fn is_written_scalar(&self) -> bool {
382        match self {
383            Self::ScalarValue(data) if data.is_fully_initialized() => true,
384            _ => false,
385        }
386    }
387
388    /// Return true if `self` has all bytes initialized.
389    fn is_initialized(&self) -> bool {
390        match self {
391            Self::ScalarValue(data) => data.is_fully_initialized(),
392            _ => true,
393        }
394    }
395
396    /// Return true if `self` is a subtype of `super_type`.
397    pub fn is_subtype(&self, super_type: &Type) -> bool {
398        match (self, super_type) {
399            // Anything can be used in place of an uninitialized value.
400            (_, Self::ScalarValue(data)) if data.is_uninitialized() => true,
401
402            (
403                Self::PtrToStruct { id: id1, offset: offset1, descriptor: descriptor1 },
404                Self::PtrToStruct { id: id2, offset: offset2, descriptor: descriptor2 },
405            ) => id1 == id2 && offset1 <= offset2 && descriptor1.is_subtype(descriptor2),
406
407            // Every type is a subtype of itself.
408            (self_type, super_type) if self_type == super_type => true,
409
410            _ => false,
411        }
412    }
413
414    /// Return true is `self` is guaranteed to be non-zero
415    pub fn is_non_zero(&self) -> bool {
416        match self {
417            Self::ScalarValue(d) => d.min() > 0,
418            Self::NullOr { .. } => false,
419            _ => true,
420        }
421    }
422
423    fn inner(&self, context: &ComputationContext) -> Result<&Type, String> {
424        match self {
425            Self::Releasable { id, inner } => {
426                if context.resources.contains(id) {
427                    Ok(&inner)
428                } else {
429                    Err("Access to released resource".to_string())
430                }
431            }
432            _ => Ok(self),
433        }
434    }
435
436    /// Constraints `type1` and `type2` for a conditional jump with the specified `jump_type` and
437    /// `jump_width`.
438    fn constraint(
439        context: &mut ComputationContext,
440        jump_type: JumpType,
441        jump_width: JumpWidth,
442        type1: Self,
443        type2: Self,
444    ) -> Result<(Self, Self), String> {
445        let result = match (jump_width, jump_type, type1.inner(context)?, type2.inner(context)?) {
446            (JumpWidth::W64, JumpType::Eq, Type::ScalarValue(data1), Type::ScalarValue(data2))
447                if data1.is_fully_initialized() && data2.is_fully_initialized() =>
448            {
449                let umin = std::cmp::max(data1.min(), data2.min());
450                let umax = std::cmp::min(data1.max(), data2.max());
451                let v = Type::ScalarValue(ScalarValueData::new(
452                    data1.value | data2.value,
453                    data1.unknown_mask & data2.unknown_mask,
454                    0,
455                    U64Range::new(umin, umax),
456                ));
457                (v.clone(), v)
458            }
459            (JumpWidth::W32, JumpType::Eq, Type::ScalarValue(data1), Type::ScalarValue(data2))
460                if data1.is_fully_initialized() && data2.is_fully_initialized() =>
461            {
462                let maybe_umin = if data1.min() <= U32_MAX && data2.min() <= U32_MAX {
463                    Some(std::cmp::max(data1.min(), data2.min()))
464                } else {
465                    None
466                };
467                let maybe_umax = if data1.max() <= U32_MAX && data2.max() <= U32_MAX {
468                    Some(std::cmp::min(data1.max(), data2.max()))
469                } else {
470                    None
471                };
472
473                let urange1 = U64Range::new(
474                    maybe_umin.unwrap_or_else(|| data1.min()),
475                    maybe_umax.unwrap_or_else(|| data1.max()),
476                );
477                let v1 = Type::ScalarValue(ScalarValueData::new(
478                    data1.value | (data2.value & U32_MAX),
479                    data1.unknown_mask & (data2.unknown_mask | (U32_MAX << 32)),
480                    0,
481                    urange1,
482                ));
483
484                let urange2 = U64Range::new(
485                    maybe_umin.unwrap_or_else(|| data2.min()),
486                    maybe_umax.unwrap_or_else(|| data2.max()),
487                );
488                let v2 = Type::ScalarValue(ScalarValueData::new(
489                    data2.value | (data1.value & U32_MAX),
490                    data2.unknown_mask & (data1.unknown_mask | (U32_MAX << 32)),
491                    0,
492                    urange2,
493                ));
494                (v1, v2)
495            }
496            (JumpWidth::W64, JumpType::Eq, Type::ScalarValue(data), Type::NullOr { id, .. })
497            | (JumpWidth::W64, JumpType::Eq, Type::NullOr { id, .. }, Type::ScalarValue(data))
498                if data.is_zero() =>
499            {
500                context.set_null(id, true);
501                let zero = Type::from(0);
502                (zero.clone(), zero)
503            }
504            (JumpWidth::W64, jump_type, Type::NullOr { id, inner }, Type::ScalarValue(data))
505                if jump_type.is_strict() && data.is_zero() =>
506            {
507                context.set_null(id, false);
508                let inner = *inner.clone();
509                inner.register_resource(context);
510                (inner, type2)
511            }
512            (JumpWidth::W64, jump_type, Type::ScalarValue(data), Type::NullOr { id, inner })
513                if jump_type.is_strict() && data.is_zero() =>
514            {
515                context.set_null(id, false);
516                let inner = *inner.clone();
517                inner.register_resource(context);
518                (type1, inner)
519            }
520
521            (JumpWidth::W64, JumpType::Lt, Type::ScalarValue(data1), Type::ScalarValue(data2))
522            | (JumpWidth::W64, JumpType::Gt, Type::ScalarValue(data2), Type::ScalarValue(data1)) => {
523                debug_assert!(data1.min() < u64::MAX);
524                debug_assert!(data2.max() > 0);
525                let new_max1 = std::cmp::min(data1.max(), data2.max() - 1);
526                debug_assert!(data1.min() <= new_max1);
527                let new_min2 = std::cmp::max(data2.min(), data1.min() + 1);
528                debug_assert!(new_min2 <= data2.max());
529                let new_range1 = U64Range::new(data1.min(), new_max1);
530                let new_range2 = U64Range::new(new_min2, data2.max());
531                (data1.update_range(new_range1).into(), data2.update_range(new_range2).into())
532            }
533
534            (JumpWidth::W64, JumpType::Le, Type::ScalarValue(data1), Type::ScalarValue(data2))
535            | (JumpWidth::W64, JumpType::Ge, Type::ScalarValue(data2), Type::ScalarValue(data1)) => {
536                let new_max1 = std::cmp::min(data1.max(), data2.max());
537                debug_assert!(data1.min() <= new_max1);
538                let new_min2 = std::cmp::max(data2.min(), data1.min());
539                debug_assert!(new_min2 <= data2.max());
540                let new_range1 = U64Range::new(data1.min(), new_max1);
541                let new_range2 = U64Range::new(new_min2, data2.max());
542                (data1.update_range(new_range1).into(), data2.update_range(new_range2).into())
543            }
544
545            (
546                JumpWidth::W64,
547                JumpType::Eq,
548                Type::PtrToArray { id: id1, offset },
549                Type::PtrToEndArray { id: id2 },
550            )
551            | (
552                JumpWidth::W64,
553                JumpType::Le,
554                Type::PtrToArray { id: id1, offset },
555                Type::PtrToEndArray { id: id2 },
556            )
557            | (
558                JumpWidth::W64,
559                JumpType::Ge,
560                Type::PtrToEndArray { id: id1 },
561                Type::PtrToArray { id: id2, offset },
562            ) if id1 == id2 => {
563                context.update_array_bounds(id1.clone(), *offset);
564                (type1, type2)
565            }
566            (
567                JumpWidth::W64,
568                JumpType::Lt,
569                Type::PtrToArray { id: id1, offset },
570                Type::PtrToEndArray { id: id2 },
571            )
572            | (
573                JumpWidth::W64,
574                JumpType::Gt,
575                Type::PtrToEndArray { id: id1 },
576                Type::PtrToArray { id: id2, offset },
577            ) if id1 == id2 => {
578                context.update_array_bounds(id1.clone(), *offset + 1);
579                (type1, type2)
580            }
581            (JumpWidth::W64, JumpType::Eq, _, _) => (type1.clone(), type1),
582            _ => (type1, type2),
583        };
584        Ok(result)
585    }
586
587    fn match_parameter_type(
588        &self,
589        context: &ComputationContext,
590        parameter_type: &Type,
591        index: usize,
592        next: &mut ComputationContext,
593    ) -> Result<(), String> {
594        match (parameter_type, self) {
595            (Type::NullOrParameter(t), Type::ScalarValue(data))
596                if data.is_known() && data.value == 0 =>
597            {
598                Ok(())
599            }
600            (Type::NullOrParameter(t), _) => self.match_parameter_type(context, t, index, next),
601            (Type::ScalarValueParameter, Type::ScalarValue(data))
602                if data.is_fully_initialized() =>
603            {
604                Ok(())
605            }
606            (Type::ConstPtrToMapParameter, Type::ConstPtrToMap { .. }) => Ok(()),
607            (
608                Type::MapKeyParameter { map_ptr_index },
609                Type::PtrToMemory { offset, buffer_size, .. },
610            ) => {
611                let schema = context.get_map_schema(*map_ptr_index)?;
612                context.check_memory_access(*offset, *buffer_size, 0, schema.key_size as usize)
613            }
614            (Type::MapKeyParameter { map_ptr_index }, Type::PtrToStack { offset }) => {
615                let schema = context.get_map_schema(*map_ptr_index)?;
616                context.stack.read_data_ptr(context.pc, *offset, schema.key_size as u64)
617            }
618            (
619                Type::MapValueParameter { map_ptr_index },
620                Type::PtrToMemory { offset, buffer_size, .. },
621            ) => {
622                let schema = context.get_map_schema(*map_ptr_index)?;
623                context.check_memory_access(*offset, *buffer_size, 0, schema.value_size as usize)
624            }
625            (Type::MapValueParameter { map_ptr_index }, Type::PtrToStack { offset }) => {
626                let schema = context.get_map_schema(*map_ptr_index)?;
627                context.stack.read_data_ptr(context.pc, *offset, schema.value_size as u64)
628            }
629            (Type::MemoryParameter { size, .. }, Type::PtrToMemory { offset, buffer_size, .. }) => {
630                let expected_size = size.size(context)?;
631                let offset_max = offset.max();
632                if offset_max > *buffer_size {
633                    return Err("out of bound read".to_string());
634                }
635                let size_left = *buffer_size - offset_max;
636                if expected_size > size_left {
637                    return Err("out of bound read".to_string());
638                }
639                Ok(())
640            }
641
642            (Type::MemoryParameter { size, input, output }, Type::PtrToStack { offset }) => {
643                let size = size.size(context)?;
644                let buffer_end = offset.add(size);
645                if !buffer_end.is_valid() {
646                    Err("out of bound access".to_string())
647                } else {
648                    if *output {
649                        next.stack.write_data_ptr(context.pc, *offset, size)?;
650                    }
651                    if *input {
652                        context.stack.read_data_ptr(context.pc, *offset, size)?;
653                    }
654                    Ok(())
655                }
656            }
657            (
658                Type::StructParameter { id: id1 },
659                Type::PtrToMemory { id: id2, offset, .. }
660                | Type::PtrToStruct { id: id2, offset, .. },
661            ) if offset.is_zero() && id1.matches(id2) => Ok(()),
662            (
663                Type::ReleasableParameter { id: id1, inner: inner1 },
664                Type::Releasable { id: id2, inner: inner2 },
665            ) if id2.has_parent(id1) => {
666                if next.resources.contains(id2) {
667                    inner2.match_parameter_type(context, inner1, index, next)
668                } else {
669                    Err(format!("Resource already released for index {index}"))
670                }
671            }
672            (Type::ReleaseParameter { id: id1 }, Type::Releasable { id: id2, .. })
673                if id2.has_parent(id1) =>
674            {
675                if next.resources.remove(id2) {
676                    Ok(())
677                } else {
678                    Err(format!("{id2:?} Resource already released for index {index}"))
679                }
680            }
681            (_, Type::Releasable { inner, .. }) => {
682                inner.match_parameter_type(context, parameter_type, index, next)
683            }
684            (Type::AnyParameter, _) => Ok(()),
685
686            _ => Err(format!("incorrect parameter for index {index}")),
687        }
688    }
689
690    /// If this `Type` is an instance of NullOr with the given `null_id`, replace it wither either
691    /// 0 or the subtype depending on `is_null`
692    fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
693        match self {
694            Type::NullOr { id, inner } if id == null_id => {
695                if is_null {
696                    *self = Type::from(0);
697                } else {
698                    *self = *inner.clone();
699                }
700            }
701            _ => {}
702        }
703    }
704
705    fn register_resource(&self, context: &mut ComputationContext) {
706        match self {
707            Type::Releasable { id, .. } => {
708                context.resources.insert(id.clone());
709            }
710            _ => {}
711        }
712    }
713
714    /// Partially Compares two iterators of comparable items.
715    ///
716    /// This function iterates through both input iterators simultaneously and compares the corresponding elements.
717    /// The comparison continues until:
718    /// 1. Both iterators are exhausted and all elements were considered equal, in which case it returns `Some(Ordering::Equal)`.
719    /// 2. All pairs of corresponding elements that are not equal have the same ordering (`Ordering::Less` or `Ordering::Greater`), in which case it returns `Some(Ordering)` reflecting that consistent ordering.
720    /// 3. One iterator is exhausted before the other, or any comparison between elements yields `None`, or not all non-equal pairs have the same ordering, in which case it returns `None`.
721    fn compare_list<'a>(
722        mut l1: impl Iterator<Item = &'a Self>,
723        mut l2: impl Iterator<Item = &'a Self>,
724    ) -> Option<Ordering> {
725        let mut result = Ordering::Equal;
726        loop {
727            match (l1.next(), l2.next()) {
728                (None, None) => return Some(result),
729                (_, None) | (None, _) => return None,
730                (Some(v1), Some(v2)) => {
731                    result = associate_orderings(result, v1.partial_cmp(v2)?)?;
732                }
733            }
734        }
735    }
736}
737
738#[derive(Clone, Debug)]
739pub struct FunctionSignature {
740    pub args: Vec<Type>,
741    pub return_value: Type,
742    pub invalidate_array_bounds: bool,
743}
744
745#[derive(Debug, Default)]
746pub struct CallingContext {
747    /// List of map schemas of the associated map. The maps can be accessed using LDDW instruction
748    /// with `src_reg=BPF_PSEUDO_MAP_IDX`.
749    pub maps: Vec<MapSchema>,
750    /// The registered external functions.
751    pub helpers: HashMap<u32, FunctionSignature>,
752    /// The args of the program.
753    pub args: Vec<Type>,
754    /// Packet type. Normally it should be either `None` or `args[0]`.
755    pub packet_type: Option<Type>,
756}
757
758impl CallingContext {
759    pub fn register_map(&mut self, schema: MapSchema) -> usize {
760        let index = self.maps.len();
761        self.maps.push(schema);
762        index
763    }
764    pub fn set_helpers(&mut self, helpers: HashMap<u32, FunctionSignature>) {
765        self.helpers = helpers;
766    }
767    pub fn set_args(&mut self, args: &[Type]) {
768        debug_assert!(args.len() <= 5);
769        self.args = args.to_vec();
770    }
771    pub fn set_packet_type(&mut self, packet_type: Type) {
772        self.packet_type = Some(packet_type);
773    }
774}
775
776#[derive(Debug, PartialEq, Clone)]
777pub struct StructAccess {
778    pub pc: ProgramCounter,
779
780    // Memory Id of the struct being accessed.
781    pub memory_id: MemoryId,
782
783    // Offset of the field being loaded.
784    pub field_offset: usize,
785
786    // Indicates that this is a 32-bit pointer load. These instructions must be remapped.
787    pub is_32_bit_ptr_load: bool,
788}
789
790#[derive(Debug, Clone)]
791pub struct VerifiedEbpfProgram {
792    pub(crate) code: Vec<EbpfInstruction>,
793    pub(crate) args: Vec<Type>,
794    pub(crate) struct_access_instructions: Vec<StructAccess>,
795    pub(crate) maps: Vec<MapSchema>,
796}
797
798impl VerifiedEbpfProgram {
799    // Convert the program to raw code. Can be used only when the program doesn't access any
800    // structs and maps.
801    pub fn to_code(self) -> Vec<EbpfInstruction> {
802        debug_assert!(self.struct_access_instructions.is_empty());
803        debug_assert!(self.maps.is_empty());
804        self.code
805    }
806
807    pub fn code(&self) -> &[EbpfInstruction] {
808        &self.code
809    }
810
811    pub fn struct_access_instructions(&self) -> &[StructAccess] {
812        &self.struct_access_instructions
813    }
814
815    pub fn from_verified_code(
816        code: Vec<EbpfInstruction>,
817        args: Vec<Type>,
818        struct_access_instructions: Vec<StructAccess>,
819        maps: Vec<MapSchema>,
820    ) -> Self {
821        Self { code, args, struct_access_instructions, maps }
822    }
823
824    pub fn maps(&self) -> &[MapSchema] {
825        &self.maps
826    }
827}
828
829/// Verify the given code depending on the type of the parameters and the registered external
830/// functions. Returned `VerifiedEbpfProgram` should be linked in order to execute it.
831pub fn verify_program(
832    code: Vec<EbpfInstruction>,
833    calling_context: CallingContext,
834    logger: &mut dyn VerifierLogger,
835) -> Result<VerifiedEbpfProgram, EbpfError> {
836    if code.len() > BPF_MAX_INSTS {
837        return error_and_log(logger, "ebpf program too long");
838    }
839
840    let mut context = ComputationContext::default();
841    for (i, t) in calling_context.args.iter().enumerate() {
842        // The parameter registers are r1 to r5.
843        context.set_reg((i + 1) as u8, t.clone()).map_err(EbpfError::ProgramVerifyError)?;
844    }
845    let states = vec![context];
846    let mut verification_context = VerificationContext {
847        calling_context,
848        logger,
849        states,
850        code: &code,
851        counter: 0,
852        iteration: 0,
853        terminating_contexts: Default::default(),
854        struct_access_instructions: Default::default(),
855    };
856    while let Some(mut context) = verification_context.states.pop() {
857        if let Some(terminating_contexts) =
858            verification_context.terminating_contexts.get(&context.pc)
859        {
860            // Check whether there exist a context that terminate and prove that this context does
861            // also terminate.
862            if let Some(ending_context) =
863                terminating_contexts.iter().find(|c| c.computation_context >= context)
864            {
865                // One such context has been found, this proves the current context terminates.
866                // If the context has a parent, register the data dependencies and try to terminate
867                // it.
868                if let Some(parent) = context.parent.take() {
869                    parent.dependencies.lock().push(ending_context.dependencies.clone());
870                    if let Some(parent) = Arc::into_inner(parent) {
871                        parent
872                            .terminate(&mut verification_context)
873                            .map_err(EbpfError::ProgramVerifyError)?;
874                    }
875                }
876                continue;
877            }
878        }
879        if verification_context.iteration > 10 * BPF_MAX_INSTS {
880            return error_and_log(verification_context.logger, "bpf byte code does not terminate");
881        }
882        if context.pc >= code.len() {
883            return error_and_log(verification_context.logger, "pc out of bounds");
884        }
885        let visit_result = context.visit(&mut verification_context, code[context.pc]);
886        match visit_result {
887            Err(message) => {
888                let message = format!("at PC {}: {}", context.pc, message);
889                return error_and_log(verification_context.logger, message);
890            }
891            _ => {}
892        }
893        verification_context.iteration += 1;
894    }
895
896    let struct_access_instructions =
897        verification_context.struct_access_instructions.into_values().collect::<Vec<_>>();
898    let CallingContext { maps, args, .. } = verification_context.calling_context;
899    Ok(VerifiedEbpfProgram { code, struct_access_instructions, maps, args })
900}
901
902struct VerificationContext<'a> {
903    /// The type information for the program arguments and the registered functions.
904    calling_context: CallingContext,
905    /// The logger to use.
906    logger: &'a mut dyn VerifierLogger,
907    /// The `ComputationContext` yet to be validated.
908    states: Vec<ComputationContext>,
909    /// The program being analyzed.
910    code: &'a [EbpfInstruction],
911    /// A counter used to generated unique ids for memory buffers and maps.
912    counter: u64,
913    /// The current iteration of the verifier. Used to ensure termination by limiting the number of
914    /// iteration before bailing out.
915    iteration: usize,
916    /// Keep track of the context that terminates at a given pc. The list of context will all be
917    /// incomparables as each time a bigger context is computed, the smaller ones are removed from
918    /// the list.
919    terminating_contexts: BTreeMap<ProgramCounter, Vec<TerminatingContext>>,
920    /// The current set of struct access instructions that will need to be updated when the
921    /// program is linked. This is also used to ensure that a given instruction always loads the
922    /// same field. If this is not the case, the verifier will reject the program.
923    struct_access_instructions: HashMap<ProgramCounter, StructAccess>,
924}
925
926impl<'a> VerificationContext<'a> {
927    fn next_id(&mut self) -> u64 {
928        let id = self.counter;
929        self.counter += 1;
930        id
931    }
932
933    /// Register an instruction that loads or stores a struct field. These instructions will need
934    /// to updated later when the program is linked.
935    fn register_struct_access(&mut self, struct_access: StructAccess) -> Result<(), String> {
936        match self.struct_access_instructions.entry(struct_access.pc) {
937            std::collections::hash_map::Entry::Vacant(entry) => {
938                entry.insert(struct_access);
939            }
940            std::collections::hash_map::Entry::Occupied(entry) => {
941                if *entry.get() != struct_access {
942                    return Err("Inconsistent struct field access".to_string());
943                }
944            }
945        }
946        Ok(())
947    }
948}
949
950const STACK_ELEMENT_SIZE: usize = std::mem::size_of::<u64>();
951const STACK_MAX_INDEX: usize = BPF_STACK_SIZE / STACK_ELEMENT_SIZE;
952
953/// An offset inside the stack. The offset is from the end of the stack.
954/// downward.
955#[derive(Clone, Copy, Debug, PartialEq)]
956pub struct StackOffset(ScalarValueData);
957
958impl Default for StackOffset {
959    fn default() -> Self {
960        Self(BPF_STACK_SIZE.into())
961    }
962}
963
964impl StackOffset {
965    /// Whether the current offset is valid.
966    fn is_valid(&self) -> bool {
967        self.0.is_known() && self.0.value <= (BPF_STACK_SIZE as u64)
968    }
969
970    /// The value of the register.
971    fn reg(&self) -> ScalarValueData {
972        self.0
973    }
974
975    /// The index into the stack array this offset points to. Can be called only if `is_valid()`
976    /// is true.
977    fn array_index(&self) -> usize {
978        debug_assert!(self.is_valid());
979        usize::try_from(self.0.value).unwrap() / STACK_ELEMENT_SIZE
980    }
981
982    /// The offset inside the aligned u64 in the stack.
983    fn sub_index(&self) -> usize {
984        debug_assert!(self.is_valid());
985        usize::try_from(self.0.value).unwrap() % STACK_ELEMENT_SIZE
986    }
987
988    fn add<T: Into<ScalarValueData>>(self, rhs: T) -> Self {
989        Self(self.0 + rhs)
990    }
991}
992
993/// The state of the stack
994#[derive(Clone, Debug, Default, PartialEq)]
995struct Stack {
996    data: HashMap<usize, Type>,
997}
998
999impl Stack {
1000    /// Replace all instances of the NullOr type with the given `null_id` to either 0 or the
1001    /// subtype depending on `is_null`
1002    fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
1003        for (_, t) in self.data.iter_mut() {
1004            t.set_null(null_id, is_null);
1005        }
1006    }
1007
1008    fn get(&self, index: usize) -> &Type {
1009        self.data.get(&index).unwrap_or(&Type::UNINITIALIZED)
1010    }
1011
1012    fn set(&mut self, index: usize, t: Type) {
1013        if t == Type::UNINITIALIZED {
1014            self.data.remove(&index);
1015        } else {
1016            self.data.insert(index, t);
1017        }
1018    }
1019
1020    fn extract_sub_value(value: u64, offset: usize, byte_count: usize) -> u64 {
1021        NativeEndian::read_uint(&value.as_bytes()[offset..], byte_count)
1022    }
1023
1024    fn insert_sub_value(mut original: u64, value: u64, width: DataWidth, offset: usize) -> u64 {
1025        let byte_count = width.bytes();
1026        let original_buf = original.as_mut_bytes();
1027        let value_buf = value.as_bytes();
1028        original_buf[offset..(byte_count + offset)].copy_from_slice(&value_buf[..byte_count]);
1029        original
1030    }
1031
1032    fn write_data_ptr(
1033        &mut self,
1034        pc: ProgramCounter,
1035        mut offset: StackOffset,
1036        bytes: u64,
1037    ) -> Result<(), String> {
1038        for i in 0..bytes {
1039            self.store(offset, Type::UNKNOWN_SCALAR, DataWidth::U8)?;
1040            offset = offset.add(1);
1041        }
1042        Ok(())
1043    }
1044
1045    fn read_data_ptr(
1046        &self,
1047        pc: ProgramCounter,
1048        offset: StackOffset,
1049        bytes: u64,
1050    ) -> Result<(), String> {
1051        let read_element =
1052            |index: usize, start_offset: usize, end_offset: usize| -> Result<(), String> {
1053                match self.get(index) {
1054                    Type::ScalarValue(data) => {
1055                        debug_assert!(end_offset > start_offset);
1056                        let unwritten_bits = Self::extract_sub_value(
1057                            data.unwritten_mask,
1058                            start_offset,
1059                            end_offset - start_offset,
1060                        );
1061                        if unwritten_bits == 0 {
1062                            Ok(())
1063                        } else {
1064                            Err("reading unwritten value from the stack".to_string())
1065                        }
1066                    }
1067                    _ => Err("invalid read from the stack".to_string()),
1068                }
1069            };
1070        if bytes == 0 {
1071            return Ok(());
1072        }
1073
1074        if bytes as usize > BPF_STACK_SIZE {
1075            return Err("stack overflow".to_string());
1076        }
1077
1078        if !offset.is_valid() {
1079            return Err("invalid stack offset".to_string());
1080        }
1081
1082        let end_offset = offset.add(bytes);
1083        if !end_offset.is_valid() {
1084            return Err("stack overflow".to_string())?;
1085        }
1086
1087        // Handle the case where all the data is contained in a single element excluding the last
1088        // byte (the case when the read ends at an element edge, i.e. `end_offset.sub_index()==0`,
1089        // is covered by the default path below).
1090        if offset.array_index() == end_offset.array_index() {
1091            return read_element(offset.array_index(), offset.sub_index(), end_offset.sub_index());
1092        }
1093
1094        // Handle the first element, that might be partial.
1095        read_element(offset.array_index(), offset.sub_index(), STACK_ELEMENT_SIZE)?;
1096
1097        // Handle the last element, that might be partial.
1098        if end_offset.sub_index() != 0 {
1099            read_element(end_offset.array_index(), 0, end_offset.sub_index())?;
1100        }
1101
1102        // Handle the any full type between beginning and end.
1103        for i in (offset.array_index() + 1)..end_offset.array_index() {
1104            read_element(i, 0, STACK_ELEMENT_SIZE)?;
1105        }
1106
1107        Ok(())
1108    }
1109
1110    fn store(&mut self, offset: StackOffset, value: Type, width: DataWidth) -> Result<(), String> {
1111        if !offset.is_valid() {
1112            return Err("out of bounds store".to_string());
1113        }
1114        if offset.sub_index() % width.bytes() != 0 {
1115            return Err("misaligned access".to_string());
1116        }
1117
1118        let index = offset.array_index();
1119        if width == DataWidth::U64 {
1120            self.set(index, value);
1121        } else {
1122            match value {
1123                Type::ScalarValue(data) => {
1124                    let old_data = match self.get(index) {
1125                        Type::ScalarValue(data) => *data,
1126                        _ => {
1127                            // The value in the stack is not a scalar. Let consider it an scalar
1128                            // value with no written bits.
1129                            ScalarValueData::UNINITIALIZED
1130                        }
1131                    };
1132                    let sub_index = offset.sub_index();
1133                    let value =
1134                        Self::insert_sub_value(old_data.value, data.value, width, sub_index);
1135                    let unknown_mask = Self::insert_sub_value(
1136                        old_data.unknown_mask,
1137                        data.unknown_mask,
1138                        width,
1139                        sub_index,
1140                    );
1141                    let unwritten_mask = Self::insert_sub_value(
1142                        old_data.unwritten_mask,
1143                        data.unwritten_mask,
1144                        width,
1145                        sub_index,
1146                    );
1147                    let urange = U64Range::compute_range_for_bytes_swap(
1148                        old_data.urange,
1149                        data.urange,
1150                        sub_index,
1151                        0,
1152                        width.bytes(),
1153                    );
1154                    self.set(
1155                        index,
1156                        Type::ScalarValue(ScalarValueData::new(
1157                            value,
1158                            unknown_mask,
1159                            unwritten_mask,
1160                            urange,
1161                        )),
1162                    );
1163                }
1164                _ => {
1165                    return Err("cannot store part of a non scalar value on the stack".to_string());
1166                }
1167            }
1168        }
1169        Ok(())
1170    }
1171
1172    fn load(&self, offset: StackOffset, width: DataWidth) -> Result<Type, String> {
1173        if !offset.is_valid() {
1174            return Err("out of bounds load".to_string());
1175        }
1176        if offset.array_index() >= STACK_MAX_INDEX {
1177            return Err("out of bounds load".to_string());
1178        }
1179        if offset.sub_index() % width.bytes() != 0 {
1180            return Err("misaligned access".to_string());
1181        }
1182
1183        let index = offset.array_index();
1184        let loaded_type = self.get(index).clone();
1185        let result = if width == DataWidth::U64 {
1186            loaded_type
1187        } else {
1188            match loaded_type {
1189                Type::ScalarValue(data) => {
1190                    let sub_index = offset.sub_index();
1191                    let value = Self::extract_sub_value(data.value, sub_index, width.bytes());
1192                    let unknown_mask =
1193                        Self::extract_sub_value(data.unknown_mask, sub_index, width.bytes());
1194                    let unwritten_mask =
1195                        Self::extract_sub_value(data.unwritten_mask, sub_index, width.bytes());
1196                    let urange = U64Range::compute_range_for_bytes_swap(
1197                        0.into(),
1198                        data.urange,
1199                        0,
1200                        sub_index,
1201                        width.bytes(),
1202                    );
1203                    Type::ScalarValue(ScalarValueData::new(
1204                        value,
1205                        unknown_mask,
1206                        unwritten_mask,
1207                        urange,
1208                    ))
1209                }
1210                _ => return Err(format!("incorrect load of {} bytes", width.bytes())),
1211            }
1212        };
1213        if !result.is_initialized() {
1214            return Err("reading unwritten value from the stack".to_string());
1215        }
1216        Ok(result)
1217    }
1218}
1219
1220/// Two types are ordered with `t1` > `t2` if a proof that a program in a state `t1` finish is also
1221/// a proof that a program in a state `t2` finish.
1222impl PartialOrd for Stack {
1223    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1224        let mut result = Ordering::Equal;
1225        let mut data_iter1 = self.data.iter().peekable();
1226        let mut data_iter2 = other.data.iter().peekable();
1227        loop {
1228            let k1 = data_iter1.peek().map(|(k, _)| *k);
1229            let k2 = data_iter2.peek().map(|(k, _)| *k);
1230            let k = match (k1, k2) {
1231                (None, None) => return Some(result),
1232                (Some(k), None) => {
1233                    data_iter1.next();
1234                    *k
1235                }
1236                (None, Some(k)) => {
1237                    data_iter2.next();
1238                    *k
1239                }
1240                (Some(k1), Some(k2)) => {
1241                    if k1 <= k2 {
1242                        data_iter1.next();
1243                    }
1244                    if k2 <= k1 {
1245                        data_iter2.next();
1246                    }
1247                    *std::cmp::min(k1, k2)
1248                }
1249            };
1250            result = associate_orderings(result, self.get(k).partial_cmp(other.get(k))?)?;
1251        }
1252    }
1253}
1254
1255macro_rules! bpf_log {
1256    ($context:ident, $verification_context:ident, $($msg:tt)*) => {
1257        let prefix = format!("{}: ({:02x})", $context.pc, $verification_context.code[$context.pc].code());
1258        let suffix = format!($($msg)*);
1259        $verification_context.logger.log(format!("{prefix} {suffix}").as_bytes());
1260    }
1261}
1262
1263/// The state of the computation as known by the verifier at a given point in time.
1264#[derive(Debug, Default)]
1265struct ComputationContext {
1266    /// The program counter.
1267    pc: ProgramCounter,
1268    /// Register 0 to 9.
1269    registers: [Type; GENERAL_REGISTER_COUNT as usize],
1270    /// The state of the stack.
1271    stack: Stack,
1272    /// The dynamically known bounds of buffers indexed by their ids.
1273    array_bounds: BTreeMap<MemoryId, u64>,
1274    /// The currently allocated resources.
1275    resources: HashSet<MemoryId>,
1276    /// The previous context in the computation.
1277    parent: Option<Arc<ComputationContext>>,
1278    /// The data dependencies of this context. This is used to broaden a known ending context to
1279    /// help cutting computation branches.
1280    dependencies: Mutex<Vec<DataDependencies>>,
1281}
1282
1283impl Clone for ComputationContext {
1284    fn clone(&self) -> Self {
1285        Self {
1286            pc: self.pc,
1287            registers: self.registers.clone(),
1288            stack: self.stack.clone(),
1289            array_bounds: self.array_bounds.clone(),
1290            resources: self.resources.clone(),
1291            parent: self.parent.clone(),
1292            // dependencies are erased as they must always be used on the same instance of the
1293            // context.
1294            dependencies: Default::default(),
1295        }
1296    }
1297}
1298
1299/// parent and dependencies are ignored for the comparison, as they do not matter for termination.
1300impl PartialEq for ComputationContext {
1301    fn eq(&self, other: &Self) -> bool {
1302        self.pc == other.pc
1303            && self.registers == other.registers
1304            && self.stack == other.stack
1305            && self.array_bounds == other.array_bounds
1306    }
1307}
1308
1309impl ComputationContext {
1310    /// Replace all instances of the NullOr type with the given `null_id` to either 0 or the
1311    /// subtype depending on `is_null`
1312    fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
1313        for i in 0..self.registers.len() {
1314            self.registers[i].set_null(null_id, is_null);
1315        }
1316        self.stack.set_null(null_id, is_null);
1317    }
1318
1319    fn reg(&self, index: Register) -> Result<Type, String> {
1320        if index >= REGISTER_COUNT {
1321            return Err(format!("R{index} is invalid"));
1322        }
1323        if index < GENERAL_REGISTER_COUNT {
1324            Ok(self.registers[index as usize].clone())
1325        } else {
1326            Ok(Type::PtrToStack { offset: StackOffset::default() })
1327        }
1328    }
1329
1330    fn set_reg(&mut self, index: Register, reg_type: Type) -> Result<(), String> {
1331        if index >= GENERAL_REGISTER_COUNT {
1332            return Err(format!("R{index} is invalid"));
1333        }
1334        self.registers[index as usize] = reg_type;
1335        Ok(())
1336    }
1337
1338    fn update_array_bounds(&mut self, id: MemoryId, new_bound: ScalarValueData) {
1339        let new_bound_min = new_bound.min();
1340        self.array_bounds
1341            .entry(id)
1342            .and_modify(|v| *v = std::cmp::max(*v, new_bound_min))
1343            .or_insert(new_bound_min);
1344    }
1345
1346    fn get_map_schema(&self, argument: u8) -> Result<MapSchema, String> {
1347        match self.reg(argument + 1)? {
1348            Type::ConstPtrToMap { schema, .. } => Ok(schema),
1349            _ => Err(format!("No map found at argument {argument}")),
1350        }
1351    }
1352
1353    fn next(&self) -> Result<Self, String> {
1354        let parent = Some(Arc::new(self.clone()));
1355        self.jump_with_offset(0, parent)
1356    }
1357
1358    /// Returns a new `ComputationContext` where the pc has jump by `offset + 1`. In particular,
1359    /// the next instruction is reached with `jump_with_offset(0)`.
1360    fn jump_with_offset(&self, offset: i16, parent: Option<Arc<Self>>) -> Result<Self, String> {
1361        let pc = self
1362            .pc
1363            .checked_add_signed(offset.into())
1364            .and_then(|v| v.checked_add_signed(1))
1365            .ok_or_else(|| "jump outside of program".to_string())?;
1366        let result = Self {
1367            pc,
1368            registers: self.registers.clone(),
1369            stack: self.stack.clone(),
1370            array_bounds: self.array_bounds.clone(),
1371            resources: self.resources.clone(),
1372            parent,
1373            dependencies: Default::default(),
1374        };
1375        Ok(result)
1376    }
1377
1378    fn check_memory_access(
1379        &self,
1380        dst_offset: ScalarValueData,
1381        dst_buffer_size: u64,
1382        instruction_offset: i16,
1383        width: usize,
1384    ) -> Result<(), String> {
1385        let memory_range = dst_offset.urange + instruction_offset + U64Range::new(0, width as u64);
1386        if memory_range.max > dst_buffer_size {
1387            return Err("out of bound access".to_string());
1388        }
1389        Ok(())
1390    }
1391
1392    fn store_memory(
1393        &mut self,
1394        context: &mut VerificationContext<'_>,
1395        addr: &Type,
1396        field: Field,
1397        value: Type,
1398    ) -> Result<(), String> {
1399        let addr = addr.inner(self)?;
1400        match *addr {
1401            Type::PtrToStack { offset } => {
1402                let offset_sum = offset.add(field.offset);
1403                return self.stack.store(offset_sum, value, field.width);
1404            }
1405            Type::PtrToMemory { offset, buffer_size, .. } => {
1406                self.check_memory_access(offset, buffer_size, field.offset, field.width.bytes())?;
1407            }
1408            Type::PtrToStruct { ref id, offset, ref descriptor, .. } => {
1409                let field_desc = descriptor
1410                    .find_field(offset, field)
1411                    .ok_or_else(|| "incorrect store".to_string())?;
1412
1413                if !matches!(field_desc.field_type, FieldType::MutableScalar { .. }) {
1414                    return Err("store to a read-only field".to_string());
1415                }
1416
1417                context.register_struct_access(StructAccess {
1418                    pc: self.pc,
1419                    memory_id: id.clone(),
1420                    field_offset: field_desc.offset,
1421                    is_32_bit_ptr_load: false,
1422                })?;
1423            }
1424            Type::PtrToArray { ref id, offset } => {
1425                self.check_memory_access(
1426                    offset,
1427                    *self.array_bounds.get(&id).unwrap_or(&0),
1428                    field.offset,
1429                    field.width.bytes(),
1430                )?;
1431            }
1432            _ => return Err("incorrect store".to_string()),
1433        }
1434
1435        match value {
1436            Type::ScalarValue(data) if data.is_fully_initialized() => {}
1437            // Private data should not be leaked.
1438            _ => return Err("incorrect store".to_string()),
1439        }
1440        Ok(())
1441    }
1442
1443    fn load_memory(
1444        &self,
1445        context: &mut VerificationContext<'_>,
1446        addr: &Type,
1447        field: Field,
1448    ) -> Result<Type, String> {
1449        let addr = addr.inner(self)?;
1450        match *addr {
1451            Type::PtrToStack { offset } => {
1452                let offset_sum = offset.add(field.offset);
1453                self.stack.load(offset_sum, field.width)
1454            }
1455            Type::PtrToMemory { ref id, offset, buffer_size, .. } => {
1456                self.check_memory_access(offset, buffer_size, field.offset, field.width.bytes())?;
1457                Ok(Type::UNKNOWN_SCALAR)
1458            }
1459            Type::PtrToStruct { ref id, offset, ref descriptor, .. } => {
1460                let field_desc = descriptor
1461                    .find_field(offset, field)
1462                    .ok_or_else(|| "incorrect load".to_string())?;
1463
1464                let (return_type, is_32_bit_ptr_load) = match &field_desc.field_type {
1465                    FieldType::Scalar { .. } | FieldType::MutableScalar { .. } => {
1466                        (Type::UNKNOWN_SCALAR, false)
1467                    }
1468                    FieldType::PtrToArray { id: array_id, is_32_bit } => (
1469                        Type::PtrToArray { id: array_id.prepended(id.clone()), offset: 0.into() },
1470                        *is_32_bit,
1471                    ),
1472                    FieldType::PtrToEndArray { id: array_id, is_32_bit } => {
1473                        (Type::PtrToEndArray { id: array_id.prepended(id.clone()) }, *is_32_bit)
1474                    }
1475                    FieldType::PtrToMemory { id: memory_id, buffer_size, is_32_bit } => (
1476                        Type::PtrToMemory {
1477                            id: memory_id.prepended(id.clone()),
1478                            offset: 0.into(),
1479                            buffer_size: *buffer_size as u64,
1480                        },
1481                        *is_32_bit,
1482                    ),
1483                    FieldType::NullablePtrToMemory { id: memory_id, buffer_size, is_32_bit } => {
1484                        let id = memory_id.prepended(id.clone());
1485                        (
1486                            Type::NullOr {
1487                                id: id.clone(),
1488                                inner: Box::new(Type::PtrToMemory {
1489                                    id,
1490                                    offset: 0.into(),
1491                                    buffer_size: *buffer_size as u64,
1492                                }),
1493                            },
1494                            *is_32_bit,
1495                        )
1496                    }
1497                };
1498
1499                context.register_struct_access(StructAccess {
1500                    pc: self.pc,
1501                    memory_id: id.clone(),
1502                    field_offset: field_desc.offset,
1503                    is_32_bit_ptr_load,
1504                })?;
1505
1506                Ok(return_type)
1507            }
1508            Type::PtrToArray { ref id, offset } => {
1509                self.check_memory_access(
1510                    offset,
1511                    *self.array_bounds.get(&id).unwrap_or(&0),
1512                    field.offset,
1513                    field.width.bytes(),
1514                )?;
1515                Ok(Type::UNKNOWN_SCALAR)
1516            }
1517            _ => Err("incorrect load".to_string()),
1518        }
1519    }
1520
1521    /**
1522     * Given the given `return_value` in a method signature, return the concrete type to use,
1523     * updating the `next` context is needed.
1524     *
1525     * `maybe_null` is true is the computed value will be a descendant of a `NullOr` type.
1526     */
1527    fn resolve_return_value(
1528        &self,
1529        verification_context: &mut VerificationContext<'_>,
1530        return_value: &Type,
1531        next: &mut ComputationContext,
1532        maybe_null: bool,
1533    ) -> Result<Type, String> {
1534        match return_value {
1535            Type::AliasParameter { parameter_index } => self.reg(parameter_index + 1),
1536            Type::ReleasableParameter { id, inner } => {
1537                let id = MemoryId::from(verification_context.next_id()).prepended(id.clone());
1538                if !maybe_null {
1539                    next.resources.insert(id.clone());
1540                }
1541                Ok(Type::Releasable {
1542                    id,
1543                    inner: Box::new(self.resolve_return_value(
1544                        verification_context,
1545                        inner,
1546                        next,
1547                        maybe_null,
1548                    )?),
1549                })
1550            }
1551            Type::NullOrParameter(t) => {
1552                let id = verification_context.next_id();
1553                Ok(Type::NullOr {
1554                    id: id.into(),
1555                    inner: Box::new(self.resolve_return_value(
1556                        verification_context,
1557                        t,
1558                        next,
1559                        true,
1560                    )?),
1561                })
1562            }
1563            Type::MapValueParameter { map_ptr_index } => {
1564                let schema = self.get_map_schema(*map_ptr_index)?;
1565                let id = verification_context.next_id();
1566                Ok(Type::PtrToMemory {
1567                    id: id.into(),
1568                    offset: 0.into(),
1569                    buffer_size: schema.value_size as u64,
1570                })
1571            }
1572            Type::MemoryParameter { size, .. } => {
1573                let buffer_size = size.size(self)?;
1574                let id = verification_context.next_id();
1575                Ok(Type::PtrToMemory { id: id.into(), offset: 0.into(), buffer_size })
1576            }
1577            t => Ok(t.clone()),
1578        }
1579    }
1580
1581    fn compute_source(&self, src: Source) -> Result<Type, String> {
1582        match src {
1583            Source::Reg(reg) => self.reg(reg),
1584            Source::Value(v) => Ok(v.into()),
1585        }
1586    }
1587
1588    fn apply_computation(
1589        context: &ComputationContext,
1590        op1: Type,
1591        op2: Type,
1592        alu_type: AluType,
1593        op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1594    ) -> Result<Type, String> {
1595        let result: Type = match (alu_type, op1.inner(context)?, op2.inner(context)?) {
1596            (_, Type::ScalarValue(data1), Type::ScalarValue(data2)) => op(*data1, *data2).into(),
1597            (
1598                AluType::Add,
1599                Type::ScalarValue(_),
1600                Type::PtrToStack { .. } | Type::PtrToMemory { .. } | Type::PtrToStruct { .. },
1601            ) => {
1602                return Self::apply_computation(context, op2, op1, alu_type, op);
1603            }
1604            (alu_type, Type::PtrToStack { offset: x }, Type::ScalarValue(data))
1605                if alu_type.is_ptr_compatible() =>
1606            {
1607                Type::PtrToStack { offset: run_on_stack_offset(*x, |x| op(x, *data)) }
1608            }
1609            (
1610                alu_type,
1611                Type::PtrToMemory { id, offset: x, buffer_size },
1612                Type::ScalarValue(data),
1613            ) if alu_type.is_ptr_compatible() => {
1614                let offset = op(*x, *data);
1615                Type::PtrToMemory { id: id.clone(), offset, buffer_size: *buffer_size }
1616            }
1617            (
1618                alu_type,
1619                Type::PtrToStruct { id, offset: x, descriptor },
1620                Type::ScalarValue(data),
1621            ) if alu_type.is_ptr_compatible() => {
1622                let offset = op(*x, *data);
1623                Type::PtrToStruct { id: id.clone(), offset, descriptor: descriptor.clone() }
1624            }
1625            (AluType::Add, Type::PtrToArray { id, offset: x }, Type::ScalarValue(data)) => {
1626                let offset = x.checked_add(*data).ok_or_else(|| format!("XXX"))?;
1627                Type::PtrToArray { id: id.clone(), offset }
1628            }
1629            (AluType::Sub, Type::PtrToArray { id, offset: x }, Type::ScalarValue(data)) => {
1630                let offset = x.checked_sub(*data).ok_or_else(|| format!("XXX"))?;
1631                Type::PtrToArray { id: id.clone(), offset }
1632            }
1633            (
1634                AluType::Sub,
1635                Type::PtrToMemory { id: id1, offset: x1, .. },
1636                Type::PtrToMemory { id: id2, offset: x2, .. },
1637            )
1638            | (
1639                AluType::Sub,
1640                Type::PtrToStruct { id: id1, offset: x1, .. },
1641                Type::PtrToStruct { id: id2, offset: x2, .. },
1642            )
1643            | (
1644                AluType::Sub,
1645                Type::PtrToArray { id: id1, offset: x1 },
1646                Type::PtrToArray { id: id2, offset: x2 },
1647            ) if id1 == id2 => Type::from(op(*x1, *x2)),
1648            (AluType::Sub, Type::PtrToStack { offset: x1 }, Type::PtrToStack { offset: x2 }) => {
1649                Type::from(op(x1.reg(), x2.reg()))
1650            }
1651            (
1652                AluType::Sub,
1653                Type::PtrToArray { id: id1, .. },
1654                Type::PtrToEndArray { id: id2, .. },
1655            )
1656            | (
1657                AluType::Sub,
1658                Type::PtrToEndArray { id: id1, .. },
1659                Type::PtrToArray { id: id2, .. },
1660            ) if id1 == id2 => Type::UNKNOWN_SCALAR,
1661            _ => Type::default(),
1662        };
1663        Ok(result)
1664    }
1665
1666    fn alu(
1667        &mut self,
1668        op_name: Option<&str>,
1669        verification_context: &mut VerificationContext<'_>,
1670        dst: Register,
1671        src: Source,
1672        alu_type: AluType,
1673        op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1674    ) -> Result<(), String> {
1675        if let Some(op_name) = op_name {
1676            bpf_log!(
1677                self,
1678                verification_context,
1679                "{op_name} {}, {}",
1680                display_register(dst),
1681                display_source(src)
1682            );
1683        }
1684        let op1 = self.reg(dst)?;
1685        let op2 = self.compute_source(src)?;
1686        let result = Self::apply_computation(self, op1, op2, alu_type, op)?;
1687        let mut next = self.next()?;
1688        next.set_reg(dst, result)?;
1689        verification_context.states.push(next);
1690        Ok(())
1691    }
1692
1693    fn log_atomic_operation(
1694        &mut self,
1695        op_name: &str,
1696        verification_context: &mut VerificationContext<'_>,
1697        fetch: bool,
1698        dst: Register,
1699        offset: i16,
1700        src: Register,
1701    ) {
1702        bpf_log!(
1703            self,
1704            verification_context,
1705            "lock {}{} [{}{}], {}",
1706            if fetch { "fetch " } else { "" },
1707            op_name,
1708            display_register(dst),
1709            print_offset(offset),
1710            display_register(src),
1711        );
1712    }
1713
1714    fn raw_atomic_operation(
1715        &mut self,
1716        op_name: &str,
1717        verification_context: &mut VerificationContext<'_>,
1718        width: DataWidth,
1719        fetch: bool,
1720        dst: Register,
1721        offset: i16,
1722        src: Register,
1723        op: impl FnOnce(&ComputationContext, Type, Type) -> Result<Type, String>,
1724    ) -> Result<(), String> {
1725        self.log_atomic_operation(op_name, verification_context, fetch, dst, offset, src);
1726        let addr = self.reg(dst)?;
1727        let value = self.reg(src)?;
1728        let field = Field::new(offset, width);
1729        let loaded_type = self.load_memory(verification_context, &addr, field)?;
1730        let result = op(self, loaded_type.clone(), value)?;
1731        let mut next = self.next()?;
1732        next.store_memory(verification_context, &addr, field, result)?;
1733        if fetch {
1734            next.set_reg(src, loaded_type)?;
1735        }
1736        verification_context.states.push(next);
1737        Ok(())
1738    }
1739
1740    fn atomic_operation(
1741        &mut self,
1742        op_name: &str,
1743        verification_context: &mut VerificationContext<'_>,
1744        width: DataWidth,
1745        fetch: bool,
1746        dst: Register,
1747        offset: i16,
1748        src: Register,
1749        alu_type: AluType,
1750        op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1751    ) -> Result<(), String> {
1752        self.raw_atomic_operation(
1753            op_name,
1754            verification_context,
1755            width,
1756            fetch,
1757            dst,
1758            offset,
1759            src,
1760            |context: &ComputationContext, v1: Type, v2: Type| {
1761                Self::apply_computation(context, v1, v2, alu_type, op)
1762            },
1763        )
1764    }
1765
1766    fn raw_atomic_cmpxchg(
1767        &mut self,
1768        op_name: &str,
1769        verification_context: &mut VerificationContext<'_>,
1770        dst: Register,
1771        offset: i16,
1772        src: Register,
1773        jump_width: JumpWidth,
1774        op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1775    ) -> Result<(), String> {
1776        self.log_atomic_operation(op_name, verification_context, true, dst, offset, src);
1777        let width = match jump_width {
1778            JumpWidth::W32 => DataWidth::U32,
1779            JumpWidth::W64 => DataWidth::U64,
1780        };
1781        let addr = self.reg(dst)?;
1782        let field = Field::new(offset, width);
1783        let dst = self.load_memory(verification_context, &addr, field)?;
1784        let value = self.reg(src)?;
1785        let r0 = self.reg(0)?;
1786        let branch = self.compute_branch(jump_width, &dst, &r0, op)?;
1787        // r0 = dst
1788        if branch.unwrap_or(true) {
1789            let mut next = self.next()?;
1790            let (dst, r0) =
1791                Type::constraint(&mut next, JumpType::Eq, jump_width, dst.clone(), r0.clone())?;
1792            next.set_reg(0, dst)?;
1793            next.store_memory(verification_context, &addr, field, value)?;
1794            verification_context.states.push(next);
1795        }
1796        // r0 != dst
1797        if !branch.unwrap_or(false) {
1798            let mut next = self.next()?;
1799            let (dst, r0) = Type::constraint(&mut next, JumpType::Ne, jump_width, dst, r0)?;
1800            next.set_reg(0, dst.clone())?;
1801            next.store_memory(verification_context, &addr, field, dst)?;
1802            verification_context.states.push(next);
1803        }
1804
1805        Ok(())
1806    }
1807    fn endianness<BO: ByteOrder>(
1808        &mut self,
1809        op_name: &str,
1810        verification_context: &mut VerificationContext<'_>,
1811        dst: Register,
1812        width: DataWidth,
1813    ) -> Result<(), String> {
1814        bpf_log!(self, verification_context, "{op_name}{} {}", width.bits(), display_register(dst),);
1815        let bit_op = |value: u64| match width {
1816            DataWidth::U16 => BO::read_u16((value as u16).as_bytes()) as u64,
1817            DataWidth::U32 => BO::read_u32((value as u32).as_bytes()) as u64,
1818            DataWidth::U64 => BO::read_u64(value.as_bytes()),
1819            _ => {
1820                panic!("Unexpected bit width for endianness operation");
1821            }
1822        };
1823        let value = self.reg(dst)?;
1824        let new_value = match value {
1825            Type::ScalarValue(data) => Type::ScalarValue(ScalarValueData::new(
1826                bit_op(data.value),
1827                bit_op(data.unknown_mask),
1828                bit_op(data.unwritten_mask),
1829                U64Range::max(),
1830            )),
1831            _ => Type::default(),
1832        };
1833        let mut next = self.next()?;
1834        next.set_reg(dst, new_value)?;
1835        verification_context.states.push(next);
1836        Ok(())
1837    }
1838
1839    fn compute_branch(
1840        &self,
1841        jump_width: JumpWidth,
1842        op1: &Type,
1843        op2: &Type,
1844        op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1845    ) -> Result<Option<bool>, String> {
1846        match (jump_width, op1, op2) {
1847            (_, Type::ScalarValue(data1), Type::ScalarValue(data2)) => op(*data1, *data2),
1848            (JumpWidth::W64, Type::ScalarValue(data), Type::NullOr { .. })
1849            | (JumpWidth::W64, Type::NullOr { .. }, Type::ScalarValue(data))
1850                if data.is_zero() =>
1851            {
1852                Ok(None)
1853            }
1854
1855            (JumpWidth::W64, Type::ScalarValue(data), t) if data.is_zero() && t.is_non_zero() => {
1856                op(1.into(), 0.into())
1857            }
1858
1859            (JumpWidth::W64, t, Type::ScalarValue(data)) if data.is_zero() && t.is_non_zero() => {
1860                op(0.into(), 1.into())
1861            }
1862
1863            (JumpWidth::W64, Type::PtrToStack { offset: x }, Type::PtrToStack { offset: y }) => {
1864                op(x.reg(), y.reg())
1865            }
1866
1867            (
1868                JumpWidth::W64,
1869                Type::PtrToMemory { id: id1, offset: x, .. },
1870                Type::PtrToMemory { id: id2, offset: y, .. },
1871            )
1872            | (
1873                JumpWidth::W64,
1874                Type::PtrToStruct { id: id1, offset: x, .. },
1875                Type::PtrToStruct { id: id2, offset: y, .. },
1876            )
1877            | (
1878                JumpWidth::W64,
1879                Type::PtrToArray { id: id1, offset: x, .. },
1880                Type::PtrToArray { id: id2, offset: y, .. },
1881            ) if *id1 == *id2 => op(*x, *y),
1882
1883            (JumpWidth::W64, Type::PtrToArray { id: id1, .. }, Type::PtrToEndArray { id: id2 })
1884            | (JumpWidth::W64, Type::PtrToEndArray { id: id1 }, Type::PtrToArray { id: id2, .. })
1885                if *id1 == *id2 =>
1886            {
1887                Ok(None)
1888            }
1889
1890            _ => Err(()),
1891        }
1892        .map_err(|_| "non permitted comparison".to_string())
1893    }
1894
1895    fn conditional_jump(
1896        &mut self,
1897        op_name: &str,
1898        verification_context: &mut VerificationContext<'_>,
1899        dst: Register,
1900        src: Source,
1901        offset: i16,
1902        jump_type: JumpType,
1903        jump_width: JumpWidth,
1904        op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1905    ) -> Result<(), String> {
1906        bpf_log!(
1907            self,
1908            verification_context,
1909            "{op_name} {}, {}, {}",
1910            display_register(dst),
1911            display_source(src),
1912            if offset == 0 { format!("0") } else { print_offset(offset) },
1913        );
1914        let op1 = self.reg(dst)?;
1915        let op2 = self.compute_source(src.clone())?;
1916        let apply_constraints_and_register = |mut next: Self,
1917                                              jump_type: JumpType|
1918         -> Result<Self, String> {
1919            if jump_type != JumpType::Unknown {
1920                let (new_op1, new_op2) =
1921                    Type::constraint(&mut next, jump_type, jump_width, op1.clone(), op2.clone())?;
1922                if dst < REGISTER_COUNT {
1923                    next.set_reg(dst, new_op1)?;
1924                }
1925                match src {
1926                    Source::Reg(r) => {
1927                        next.set_reg(r, new_op2)?;
1928                    }
1929                    _ => {
1930                        // Nothing to do
1931                    }
1932                }
1933            }
1934            Ok(next)
1935        };
1936        let branch = self.compute_branch(jump_width, &op1, &op2, op)?;
1937        let parent = Some(Arc::new(self.clone()));
1938        if branch.unwrap_or(true) {
1939            // Do the jump
1940            verification_context.states.push(apply_constraints_and_register(
1941                self.jump_with_offset(offset, parent.clone())?,
1942                jump_type,
1943            )?);
1944        }
1945        if !branch.unwrap_or(false) {
1946            // Skip the jump
1947            verification_context.states.push(apply_constraints_and_register(
1948                self.jump_with_offset(0, parent)?,
1949                jump_type.invert(),
1950            )?);
1951        }
1952        Ok(())
1953    }
1954
1955    /// Handles the termination of a `ComputationContext`, performing branch cutting optimization.
1956    ///
1957    /// This method is called when it has been proven that the current context terminates (e.g.,
1958    /// reaches an `exit` instruction).
1959    ///
1960    /// The following steps are performed:
1961    /// 1. **Dependency Calculation:** The data dependencies of the context are computed based on
1962    ///    the dependencies of its terminated children and the instruction at the current PC.
1963    /// 2. **Context Broadening:** The context's state is broadened by clearing registers and stack
1964    ///    slots that are *not* in the calculated data dependencies. This optimization assumes that
1965    ///    data not used by the terminated branch is irrelevant for future execution paths.
1966    /// 3. **Termination Registration:** The broadened context is added to the set of terminating
1967    ///    contexts if it is not less than any existing terminating context at the same PC.
1968    /// 4. **Parent Termination:** If all the children of the current context have terminated,
1969    ///    its parent context is recursively terminated.
1970    fn terminate(self, verification_context: &mut VerificationContext<'_>) -> Result<(), String> {
1971        let mut next = Some(self);
1972        // Because of the potential length of the parent chain, do not use recursion.
1973        while let Some(mut current) = next.take() {
1974            // Take the parent to process it at the end and not keep it in the terminating
1975            // contexts.
1976            let parent = current.parent.take();
1977
1978            // 1. Compute the dependencies of the context using the dependencies of its children
1979            //    and the actual operation.
1980            let mut dependencies = DataDependencies::default();
1981            for dependency in current.dependencies.get_mut().iter() {
1982                dependencies.merge(dependency);
1983            }
1984
1985            dependencies.visit(
1986                &mut DataDependenciesVisitorContext {
1987                    calling_context: &verification_context.calling_context,
1988                    computation_context: &current,
1989                },
1990                verification_context.code[current.pc],
1991            )?;
1992
1993            // 2. Clear the state depending on the dependencies states
1994            for register in 0..GENERAL_REGISTER_COUNT {
1995                if !dependencies.registers.contains(&register) {
1996                    current.set_reg(register, Default::default())?;
1997                }
1998            }
1999            current.stack.data.retain(|k, _| dependencies.stack.contains(k));
2000
2001            // 3. Add the cleared state to the set of `terminating_contexts`
2002            let terminating_contexts =
2003                verification_context.terminating_contexts.entry(current.pc).or_default();
2004            let mut is_dominated = false;
2005            terminating_contexts.retain(|c| match c.computation_context.partial_cmp(&current) {
2006                Some(Ordering::Less) => false,
2007                Some(Ordering::Equal) | Some(Ordering::Greater) => {
2008                    // If the list contains a context greater or equal to the current one, it
2009                    // should not be added.
2010                    is_dominated = true;
2011                    true
2012                }
2013                _ => true,
2014            });
2015            if !is_dominated {
2016                terminating_contexts.push(TerminatingContext {
2017                    computation_context: current,
2018                    dependencies: dependencies.clone(),
2019                });
2020            }
2021
2022            // 4. Register the computed dependencies in our parent, and terminate it if all
2023            //    dependencies has been computed.
2024            if let Some(parent) = parent {
2025                parent.dependencies.lock().push(dependencies);
2026                // To check whether all dependencies have been computed, rely on the fact that the Arc
2027                // count of the parent keep track of how many dependencies are left.
2028                next = Arc::into_inner(parent);
2029            }
2030        }
2031        Ok(())
2032    }
2033}
2034
2035impl Drop for ComputationContext {
2036    fn drop(&mut self) {
2037        let mut next = self.parent.take().and_then(Arc::into_inner);
2038        // Because of the potential length of the parent chain, do not use recursion.
2039        while let Some(mut current) = next {
2040            next = current.parent.take().and_then(Arc::into_inner);
2041        }
2042    }
2043}
2044
2045/// Two types are ordered with `t1` > `t2` if a proof that a program in a state `t1` finish is also
2046/// a proof that a program in a state `t2` finish.
2047impl PartialOrd for ComputationContext {
2048    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2049        if self.pc != other.pc || self.resources != other.resources {
2050            return None;
2051        }
2052        let mut result = self.stack.partial_cmp(&other.stack)?;
2053        result = associate_orderings(
2054            result,
2055            Type::compare_list(self.registers.iter(), other.registers.iter())?,
2056        )?;
2057        let mut array_bound_iter1 = self.array_bounds.iter().peekable();
2058        let mut array_bound_iter2 = other.array_bounds.iter().peekable();
2059        let result = loop {
2060            match (array_bound_iter1.peek().cloned(), array_bound_iter2.peek().cloned()) {
2061                (None, None) => break result,
2062                (None, _) => break associate_orderings(result, Ordering::Greater)?,
2063                (_, None) => break associate_orderings(result, Ordering::Less)?,
2064                (Some((k1, v1)), Some((k2, v2))) => match k1.cmp(k2) {
2065                    Ordering::Equal => {
2066                        array_bound_iter1.next();
2067                        array_bound_iter2.next();
2068                        result = associate_orderings(result, v2.cmp(v1))?;
2069                    }
2070                    v @ Ordering::Less => {
2071                        array_bound_iter1.next();
2072                        result = associate_orderings(result, v)?;
2073                    }
2074                    v @ Ordering::Greater => {
2075                        array_bound_iter2.next();
2076                        result = associate_orderings(result, v)?;
2077                    }
2078                },
2079            }
2080        };
2081        Some(result)
2082    }
2083}
2084
2085/// Represents the read data dependencies of an eBPF program branch.
2086///
2087/// This struct tracks which registers and stack positions are *read* by the
2088/// instructions within a branch of the eBPF program.  This information is used
2089/// during branch cutting optimization to broaden terminating contexts.
2090///
2091/// The verifier assumes that data not read by a terminated branch is irrelevant
2092/// for future execution paths and can be safely cleared.
2093#[derive(Clone, Debug, Default)]
2094struct DataDependencies {
2095    /// The set of registers read by the children of a context.
2096    registers: HashSet<Register>,
2097    /// The stack positions read by the children of a context.
2098    stack: HashSet<usize>,
2099}
2100
2101impl DataDependencies {
2102    fn merge(&mut self, other: &DataDependencies) {
2103        self.registers.extend(other.registers.iter());
2104        self.stack.extend(other.stack.iter());
2105    }
2106
2107    fn alu(&mut self, dst: Register, src: Source) -> Result<(), String> {
2108        // Only do something if the dst is read, otherwise the computation doesn't matter.
2109        if self.registers.contains(&dst) {
2110            if let Source::Reg(src) = src {
2111                self.registers.insert(src);
2112            }
2113        }
2114        Ok(())
2115    }
2116
2117    fn jmp(&mut self, dst: Register, src: Source) -> Result<(), String> {
2118        self.registers.insert(dst);
2119        if let Source::Reg(src) = src {
2120            self.registers.insert(src);
2121        }
2122        Ok(())
2123    }
2124
2125    fn atomic(
2126        &mut self,
2127        context: &ComputationContext,
2128        fetch: bool,
2129        dst: Register,
2130        offset: i16,
2131        src: Register,
2132        width: DataWidth,
2133        is_cmpxchg: bool,
2134    ) -> Result<(), String> {
2135        let mut is_read = false;
2136        if is_cmpxchg && self.registers.contains(&0) {
2137            is_read = true;
2138        }
2139        if fetch && self.registers.contains(&src) {
2140            is_read = true;
2141        }
2142        let addr = context.reg(dst)?;
2143        if let Type::PtrToStack { offset: stack_offset } = addr {
2144            let stack_offset = stack_offset.add(offset);
2145            if !stack_offset.is_valid() {
2146                return Err(format!("Invalid stack offset at {}", context.pc));
2147            }
2148            if is_read || self.stack.contains(&stack_offset.array_index()) {
2149                is_read = true;
2150                self.stack.insert(stack_offset.array_index());
2151            }
2152        }
2153        if is_read {
2154            self.registers.insert(0);
2155            self.registers.insert(src);
2156        }
2157        self.registers.insert(dst);
2158        Ok(())
2159    }
2160}
2161
2162struct DataDependenciesVisitorContext<'a> {
2163    calling_context: &'a CallingContext,
2164    computation_context: &'a ComputationContext,
2165}
2166
2167impl BpfVisitor for DataDependencies {
2168    type Context<'a> = DataDependenciesVisitorContext<'a>;
2169
2170    fn add<'a>(
2171        &mut self,
2172        _context: &mut Self::Context<'a>,
2173        dst: Register,
2174        src: Source,
2175    ) -> Result<(), String> {
2176        self.alu(dst, src)
2177    }
2178    fn add64<'a>(
2179        &mut self,
2180        _context: &mut Self::Context<'a>,
2181        dst: Register,
2182        src: Source,
2183    ) -> Result<(), String> {
2184        self.alu(dst, src)
2185    }
2186    fn and<'a>(
2187        &mut self,
2188        _context: &mut Self::Context<'a>,
2189        dst: Register,
2190        src: Source,
2191    ) -> Result<(), String> {
2192        self.alu(dst, src)
2193    }
2194    fn and64<'a>(
2195        &mut self,
2196        _context: &mut Self::Context<'a>,
2197        dst: Register,
2198        src: Source,
2199    ) -> Result<(), String> {
2200        self.alu(dst, src)
2201    }
2202    fn arsh<'a>(
2203        &mut self,
2204        _context: &mut Self::Context<'a>,
2205        dst: Register,
2206        src: Source,
2207    ) -> Result<(), String> {
2208        self.alu(dst, src)
2209    }
2210    fn arsh64<'a>(
2211        &mut self,
2212        _context: &mut Self::Context<'a>,
2213        dst: Register,
2214        src: Source,
2215    ) -> Result<(), String> {
2216        self.alu(dst, src)
2217    }
2218    fn div<'a>(
2219        &mut self,
2220        _context: &mut Self::Context<'a>,
2221        dst: Register,
2222        src: Source,
2223    ) -> Result<(), String> {
2224        self.alu(dst, src)
2225    }
2226    fn div64<'a>(
2227        &mut self,
2228        _context: &mut Self::Context<'a>,
2229        dst: Register,
2230        src: Source,
2231    ) -> Result<(), String> {
2232        self.alu(dst, src)
2233    }
2234    fn lsh<'a>(
2235        &mut self,
2236        _context: &mut Self::Context<'a>,
2237        dst: Register,
2238        src: Source,
2239    ) -> Result<(), String> {
2240        self.alu(dst, src)
2241    }
2242    fn lsh64<'a>(
2243        &mut self,
2244        _context: &mut Self::Context<'a>,
2245        dst: Register,
2246        src: Source,
2247    ) -> Result<(), String> {
2248        self.alu(dst, src)
2249    }
2250    fn r#mod<'a>(
2251        &mut self,
2252        _context: &mut Self::Context<'a>,
2253        dst: Register,
2254        src: Source,
2255    ) -> Result<(), String> {
2256        self.alu(dst, src)
2257    }
2258    fn mod64<'a>(
2259        &mut self,
2260        _context: &mut Self::Context<'a>,
2261        dst: Register,
2262        src: Source,
2263    ) -> Result<(), String> {
2264        self.alu(dst, src)
2265    }
2266    fn mul<'a>(
2267        &mut self,
2268        _context: &mut Self::Context<'a>,
2269        dst: Register,
2270        src: Source,
2271    ) -> Result<(), String> {
2272        self.alu(dst, src)
2273    }
2274    fn mul64<'a>(
2275        &mut self,
2276        _context: &mut Self::Context<'a>,
2277        dst: Register,
2278        src: Source,
2279    ) -> Result<(), String> {
2280        self.alu(dst, src)
2281    }
2282    fn or<'a>(
2283        &mut self,
2284        _context: &mut Self::Context<'a>,
2285        dst: Register,
2286        src: Source,
2287    ) -> Result<(), String> {
2288        self.alu(dst, src)
2289    }
2290    fn or64<'a>(
2291        &mut self,
2292        _context: &mut Self::Context<'a>,
2293        dst: Register,
2294        src: Source,
2295    ) -> Result<(), String> {
2296        self.alu(dst, src)
2297    }
2298    fn rsh<'a>(
2299        &mut self,
2300        _context: &mut Self::Context<'a>,
2301        dst: Register,
2302        src: Source,
2303    ) -> Result<(), String> {
2304        self.alu(dst, src)
2305    }
2306    fn rsh64<'a>(
2307        &mut self,
2308        _context: &mut Self::Context<'a>,
2309        dst: Register,
2310        src: Source,
2311    ) -> Result<(), String> {
2312        self.alu(dst, src)
2313    }
2314    fn sub<'a>(
2315        &mut self,
2316        _context: &mut Self::Context<'a>,
2317        dst: Register,
2318        src: Source,
2319    ) -> Result<(), String> {
2320        self.alu(dst, src)
2321    }
2322    fn sub64<'a>(
2323        &mut self,
2324        _context: &mut Self::Context<'a>,
2325        dst: Register,
2326        src: Source,
2327    ) -> Result<(), String> {
2328        self.alu(dst, src)
2329    }
2330    fn xor<'a>(
2331        &mut self,
2332        _context: &mut Self::Context<'a>,
2333        dst: Register,
2334        src: Source,
2335    ) -> Result<(), String> {
2336        self.alu(dst, src)
2337    }
2338    fn xor64<'a>(
2339        &mut self,
2340        _context: &mut Self::Context<'a>,
2341        dst: Register,
2342        src: Source,
2343    ) -> Result<(), String> {
2344        self.alu(dst, src)
2345    }
2346
2347    fn mov<'a>(
2348        &mut self,
2349        _context: &mut Self::Context<'a>,
2350        dst: Register,
2351        src: Source,
2352    ) -> Result<(), String> {
2353        if src == Source::Reg(dst) || !self.registers.contains(&dst) {
2354            return Ok(());
2355        }
2356        if let Source::Reg(src) = src {
2357            self.registers.insert(src);
2358        }
2359        self.registers.remove(&dst);
2360        Ok(())
2361    }
2362    fn mov64<'a>(
2363        &mut self,
2364        context: &mut Self::Context<'a>,
2365        dst: Register,
2366        src: Source,
2367    ) -> Result<(), String> {
2368        self.mov(context, dst, src)
2369    }
2370
2371    fn neg<'a>(&mut self, _context: &mut Self::Context<'a>, _dst: Register) -> Result<(), String> {
2372        // This is reading and writing the same value. This induces no change in dependencies.
2373        Ok(())
2374    }
2375    fn neg64<'a>(
2376        &mut self,
2377        _context: &mut Self::Context<'a>,
2378        _dst: Register,
2379    ) -> Result<(), String> {
2380        // This is reading and writing the same value. This induces no change in dependencies.
2381        Ok(())
2382    }
2383
2384    fn be<'a>(
2385        &mut self,
2386        _context: &mut Self::Context<'a>,
2387        _dst: Register,
2388        _width: DataWidth,
2389    ) -> Result<(), String> {
2390        // This is reading and writing the same value. This induces no change in dependencies.
2391        Ok(())
2392    }
2393    fn le<'a>(
2394        &mut self,
2395        _context: &mut Self::Context<'a>,
2396        _dst: Register,
2397        _width: DataWidth,
2398    ) -> Result<(), String> {
2399        // This is reading and writing the same value. This induces no change in dependencies.
2400        Ok(())
2401    }
2402
2403    fn call_external<'a>(
2404        &mut self,
2405        context: &mut Self::Context<'a>,
2406        index: u32,
2407    ) -> Result<(), String> {
2408        let Some(signature) = context.calling_context.helpers.get(&index).cloned() else {
2409            return Err(format!("unknown external function {}", index));
2410        };
2411        // 0 is overwritten and 1 to 5 are scratch registers
2412        for register in 0..signature.args.len() + 1 {
2413            self.registers.remove(&(register as Register));
2414        }
2415        // 1 to k are parameters.
2416        for register in 0..signature.args.len() {
2417            self.registers.insert((register + 1) as Register);
2418        }
2419        Ok(())
2420    }
2421
2422    fn exit<'a>(&mut self, _context: &mut Self::Context<'a>) -> Result<(), String> {
2423        // This read r0 unconditionally.
2424        self.registers.insert(0);
2425        Ok(())
2426    }
2427
2428    fn jump<'a>(&mut self, _context: &mut Self::Context<'a>, _offset: i16) -> Result<(), String> {
2429        // This doesn't do anything with values.
2430        Ok(())
2431    }
2432
2433    fn jeq<'a>(
2434        &mut self,
2435        _context: &mut Self::Context<'a>,
2436        dst: Register,
2437        src: Source,
2438        offset: i16,
2439    ) -> Result<(), String> {
2440        self.jmp(dst, src)
2441    }
2442    fn jeq64<'a>(
2443        &mut self,
2444        _context: &mut Self::Context<'a>,
2445        dst: Register,
2446        src: Source,
2447        offset: i16,
2448    ) -> Result<(), String> {
2449        self.jmp(dst, src)
2450    }
2451    fn jne<'a>(
2452        &mut self,
2453        _context: &mut Self::Context<'a>,
2454        dst: Register,
2455        src: Source,
2456        offset: i16,
2457    ) -> Result<(), String> {
2458        self.jmp(dst, src)
2459    }
2460    fn jne64<'a>(
2461        &mut self,
2462        _context: &mut Self::Context<'a>,
2463        dst: Register,
2464        src: Source,
2465        offset: i16,
2466    ) -> Result<(), String> {
2467        self.jmp(dst, src)
2468    }
2469    fn jge<'a>(
2470        &mut self,
2471        _context: &mut Self::Context<'a>,
2472        dst: Register,
2473        src: Source,
2474        offset: i16,
2475    ) -> Result<(), String> {
2476        self.jmp(dst, src)
2477    }
2478    fn jge64<'a>(
2479        &mut self,
2480        _context: &mut Self::Context<'a>,
2481        dst: Register,
2482        src: Source,
2483        offset: i16,
2484    ) -> Result<(), String> {
2485        self.jmp(dst, src)
2486    }
2487    fn jgt<'a>(
2488        &mut self,
2489        _context: &mut Self::Context<'a>,
2490        dst: Register,
2491        src: Source,
2492        offset: i16,
2493    ) -> Result<(), String> {
2494        self.jmp(dst, src)
2495    }
2496    fn jgt64<'a>(
2497        &mut self,
2498        _context: &mut Self::Context<'a>,
2499        dst: Register,
2500        src: Source,
2501        offset: i16,
2502    ) -> Result<(), String> {
2503        self.jmp(dst, src)
2504    }
2505    fn jle<'a>(
2506        &mut self,
2507        _context: &mut Self::Context<'a>,
2508        dst: Register,
2509        src: Source,
2510        offset: i16,
2511    ) -> Result<(), String> {
2512        self.jmp(dst, src)
2513    }
2514    fn jle64<'a>(
2515        &mut self,
2516        _context: &mut Self::Context<'a>,
2517        dst: Register,
2518        src: Source,
2519        offset: i16,
2520    ) -> Result<(), String> {
2521        self.jmp(dst, src)
2522    }
2523    fn jlt<'a>(
2524        &mut self,
2525        _context: &mut Self::Context<'a>,
2526        dst: Register,
2527        src: Source,
2528        offset: i16,
2529    ) -> Result<(), String> {
2530        self.jmp(dst, src)
2531    }
2532    fn jlt64<'a>(
2533        &mut self,
2534        _context: &mut Self::Context<'a>,
2535        dst: Register,
2536        src: Source,
2537        offset: i16,
2538    ) -> Result<(), String> {
2539        self.jmp(dst, src)
2540    }
2541    fn jsge<'a>(
2542        &mut self,
2543        _context: &mut Self::Context<'a>,
2544        dst: Register,
2545        src: Source,
2546        offset: i16,
2547    ) -> Result<(), String> {
2548        self.jmp(dst, src)
2549    }
2550    fn jsge64<'a>(
2551        &mut self,
2552        _context: &mut Self::Context<'a>,
2553        dst: Register,
2554        src: Source,
2555        offset: i16,
2556    ) -> Result<(), String> {
2557        self.jmp(dst, src)
2558    }
2559    fn jsgt<'a>(
2560        &mut self,
2561        _context: &mut Self::Context<'a>,
2562        dst: Register,
2563        src: Source,
2564        offset: i16,
2565    ) -> Result<(), String> {
2566        self.jmp(dst, src)
2567    }
2568    fn jsgt64<'a>(
2569        &mut self,
2570        _context: &mut Self::Context<'a>,
2571        dst: Register,
2572        src: Source,
2573        offset: i16,
2574    ) -> Result<(), String> {
2575        self.jmp(dst, src)
2576    }
2577    fn jsle<'a>(
2578        &mut self,
2579        _context: &mut Self::Context<'a>,
2580        dst: Register,
2581        src: Source,
2582        offset: i16,
2583    ) -> Result<(), String> {
2584        self.jmp(dst, src)
2585    }
2586    fn jsle64<'a>(
2587        &mut self,
2588        _context: &mut Self::Context<'a>,
2589        dst: Register,
2590        src: Source,
2591        offset: i16,
2592    ) -> Result<(), String> {
2593        self.jmp(dst, src)
2594    }
2595    fn jslt<'a>(
2596        &mut self,
2597        _context: &mut Self::Context<'a>,
2598        dst: Register,
2599        src: Source,
2600        offset: i16,
2601    ) -> Result<(), String> {
2602        self.jmp(dst, src)
2603    }
2604    fn jslt64<'a>(
2605        &mut self,
2606        _context: &mut Self::Context<'a>,
2607        dst: Register,
2608        src: Source,
2609        offset: i16,
2610    ) -> Result<(), String> {
2611        self.jmp(dst, src)
2612    }
2613    fn jset<'a>(
2614        &mut self,
2615        _context: &mut Self::Context<'a>,
2616        dst: Register,
2617        src: Source,
2618        offset: i16,
2619    ) -> Result<(), String> {
2620        self.jmp(dst, src)
2621    }
2622    fn jset64<'a>(
2623        &mut self,
2624        _context: &mut Self::Context<'a>,
2625        dst: Register,
2626        src: Source,
2627        offset: i16,
2628    ) -> Result<(), String> {
2629        self.jmp(dst, src)
2630    }
2631
2632    fn atomic_add<'a>(
2633        &mut self,
2634        context: &mut Self::Context<'a>,
2635        fetch: bool,
2636        dst: Register,
2637        offset: i16,
2638        src: Register,
2639    ) -> Result<(), String> {
2640        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2641    }
2642
2643    fn atomic_add64<'a>(
2644        &mut self,
2645        context: &mut Self::Context<'a>,
2646        fetch: bool,
2647        dst: Register,
2648        offset: i16,
2649        src: Register,
2650    ) -> Result<(), String> {
2651        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2652    }
2653
2654    fn atomic_and<'a>(
2655        &mut self,
2656        context: &mut Self::Context<'a>,
2657        fetch: bool,
2658        dst: Register,
2659        offset: i16,
2660        src: Register,
2661    ) -> Result<(), String> {
2662        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2663    }
2664
2665    fn atomic_and64<'a>(
2666        &mut self,
2667        context: &mut Self::Context<'a>,
2668        fetch: bool,
2669        dst: Register,
2670        offset: i16,
2671        src: Register,
2672    ) -> Result<(), String> {
2673        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2674    }
2675
2676    fn atomic_or<'a>(
2677        &mut self,
2678        context: &mut Self::Context<'a>,
2679        fetch: bool,
2680        dst: Register,
2681        offset: i16,
2682        src: Register,
2683    ) -> Result<(), String> {
2684        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2685    }
2686
2687    fn atomic_or64<'a>(
2688        &mut self,
2689        context: &mut Self::Context<'a>,
2690        fetch: bool,
2691        dst: Register,
2692        offset: i16,
2693        src: Register,
2694    ) -> Result<(), String> {
2695        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2696    }
2697
2698    fn atomic_xor<'a>(
2699        &mut self,
2700        context: &mut Self::Context<'a>,
2701        fetch: bool,
2702        dst: Register,
2703        offset: i16,
2704        src: Register,
2705    ) -> Result<(), String> {
2706        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2707    }
2708
2709    fn atomic_xor64<'a>(
2710        &mut self,
2711        context: &mut Self::Context<'a>,
2712        fetch: bool,
2713        dst: Register,
2714        offset: i16,
2715        src: Register,
2716    ) -> Result<(), String> {
2717        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2718    }
2719
2720    fn atomic_xchg<'a>(
2721        &mut self,
2722        context: &mut Self::Context<'a>,
2723        fetch: bool,
2724        dst: Register,
2725        offset: i16,
2726        src: Register,
2727    ) -> Result<(), String> {
2728        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2729    }
2730
2731    fn atomic_xchg64<'a>(
2732        &mut self,
2733        context: &mut Self::Context<'a>,
2734        fetch: bool,
2735        dst: Register,
2736        offset: i16,
2737        src: Register,
2738    ) -> Result<(), String> {
2739        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2740    }
2741
2742    fn atomic_cmpxchg<'a>(
2743        &mut self,
2744        context: &mut Self::Context<'a>,
2745        dst: Register,
2746        offset: i16,
2747        src: Register,
2748    ) -> Result<(), String> {
2749        self.atomic(&context.computation_context, true, dst, offset, src, DataWidth::U32, true)
2750    }
2751
2752    fn atomic_cmpxchg64<'a>(
2753        &mut self,
2754        context: &mut Self::Context<'a>,
2755        dst: Register,
2756        offset: i16,
2757        src: Register,
2758    ) -> Result<(), String> {
2759        self.atomic(&context.computation_context, true, dst, offset, src, DataWidth::U64, true)
2760    }
2761
2762    fn load<'a>(
2763        &mut self,
2764        context: &mut Self::Context<'a>,
2765        dst: Register,
2766        offset: i16,
2767        src: Register,
2768        width: DataWidth,
2769    ) -> Result<(), String> {
2770        let context = &context.computation_context;
2771        if self.registers.contains(&dst) {
2772            let addr = context.reg(src)?;
2773            if let Type::PtrToStack { offset: stack_offset } = addr {
2774                let stack_offset = stack_offset.add(offset);
2775                if !stack_offset.is_valid() {
2776                    return Err(format!("Invalid stack offset at {}", context.pc));
2777                }
2778                self.stack.insert(stack_offset.array_index());
2779            }
2780        }
2781        self.registers.insert(src);
2782        Ok(())
2783    }
2784
2785    fn load64<'a>(
2786        &mut self,
2787        _context: &mut Self::Context<'a>,
2788        dst: Register,
2789        _src: u8,
2790        _lower: u32,
2791    ) -> Result<(), String> {
2792        self.registers.remove(&dst);
2793        Ok(())
2794    }
2795
2796    fn load_from_packet<'a>(
2797        &mut self,
2798        _context: &mut Self::Context<'a>,
2799        dst: Register,
2800        src: Register,
2801        _offset: i32,
2802        register_offset: Option<Register>,
2803        _width: DataWidth,
2804    ) -> Result<(), String> {
2805        // 1 to 5 are scratch registers
2806        for register in 1..6 {
2807            self.registers.remove(&(register as Register));
2808        }
2809        // Only do something if the dst is read, otherwise the computation doesn't matter.
2810        if self.registers.remove(&dst) {
2811            self.registers.insert(src);
2812            if let Some(reg) = register_offset {
2813                self.registers.insert(reg);
2814            }
2815        }
2816        Ok(())
2817    }
2818
2819    fn store<'a>(
2820        &mut self,
2821        context: &mut Self::Context<'a>,
2822        dst: Register,
2823        offset: i16,
2824        src: Source,
2825        width: DataWidth,
2826    ) -> Result<(), String> {
2827        let context = &context.computation_context;
2828        let addr = context.reg(dst)?;
2829        if let Type::PtrToStack { offset: stack_offset } = addr {
2830            let stack_offset = stack_offset.add(offset);
2831            if !stack_offset.is_valid() {
2832                return Err(format!("Invalid stack offset at {}", context.pc));
2833            }
2834            if self.stack.remove(&stack_offset.array_index()) {
2835                if let Source::Reg(src) = src {
2836                    self.registers.insert(src);
2837                }
2838            }
2839        } else {
2840            if let Source::Reg(src) = src {
2841                self.registers.insert(src);
2842            }
2843        }
2844
2845        Ok(())
2846    }
2847}
2848
2849#[derive(Debug)]
2850struct TerminatingContext {
2851    computation_context: ComputationContext,
2852    dependencies: DataDependencies,
2853}
2854
2855#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2856enum AluType {
2857    Plain,
2858    Sub,
2859    Add,
2860}
2861
2862impl AluType {
2863    /// Can this operation be done one a pointer and a scalar.
2864    fn is_ptr_compatible(&self) -> bool {
2865        match self {
2866            Self::Sub | Self::Add => true,
2867            _ => false,
2868        }
2869    }
2870}
2871
2872#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2873enum JumpWidth {
2874    W32,
2875    W64,
2876}
2877
2878#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2879enum JumpType {
2880    Eq,
2881    Ge,
2882    Gt,
2883    Le,
2884    LooseComparaison,
2885    Lt,
2886    Ne,
2887    StrictComparaison,
2888    Unknown,
2889}
2890
2891impl JumpType {
2892    fn invert(&self) -> Self {
2893        match self {
2894            Self::Eq => Self::Ne,
2895            Self::Ge => Self::Lt,
2896            Self::Gt => Self::Le,
2897            Self::Le => Self::Gt,
2898            Self::LooseComparaison => Self::StrictComparaison,
2899            Self::Lt => Self::Ge,
2900            Self::Ne => Self::Eq,
2901            Self::StrictComparaison => Self::LooseComparaison,
2902            Self::Unknown => Self::Unknown,
2903        }
2904    }
2905
2906    fn is_strict(&self) -> bool {
2907        match self {
2908            Self::Gt | Self::Lt | Self::Ne | Self::StrictComparaison => true,
2909            _ => false,
2910        }
2911    }
2912}
2913
2914fn display_register(register: Register) -> String {
2915    format!("%r{register}")
2916}
2917
2918fn display_source(src: Source) -> String {
2919    match src {
2920        Source::Reg(r) => display_register(r),
2921        Source::Value(v) => format!("0x{v:x}"),
2922    }
2923}
2924
2925impl BpfVisitor for ComputationContext {
2926    type Context<'a> = VerificationContext<'a>;
2927
2928    fn add<'a>(
2929        &mut self,
2930        context: &mut Self::Context<'a>,
2931        dst: Register,
2932        src: Source,
2933    ) -> Result<(), String> {
2934        self.alu(Some("add32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x + y))
2935    }
2936    fn add64<'a>(
2937        &mut self,
2938        context: &mut Self::Context<'a>,
2939        dst: Register,
2940        src: Source,
2941    ) -> Result<(), String> {
2942        self.alu(Some("add"), context, dst, src, AluType::Add, |x, y| x + y)
2943    }
2944    fn and<'a>(
2945        &mut self,
2946        context: &mut Self::Context<'a>,
2947        dst: Register,
2948        src: Source,
2949    ) -> Result<(), String> {
2950        self.alu(Some("and32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x & y))
2951    }
2952    fn and64<'a>(
2953        &mut self,
2954        context: &mut Self::Context<'a>,
2955        dst: Register,
2956        src: Source,
2957    ) -> Result<(), String> {
2958        self.alu(Some("and"), context, dst, src, AluType::Plain, |x, y| x & y)
2959    }
2960    fn arsh<'a>(
2961        &mut self,
2962        context: &mut Self::Context<'a>,
2963        dst: Register,
2964        src: Source,
2965    ) -> Result<(), String> {
2966        self.alu(Some("arsh32"), context, dst, src, AluType::Plain, |x, y| {
2967            alu32(x, y, |x, y| x.ashr(y))
2968        })
2969    }
2970    fn arsh64<'a>(
2971        &mut self,
2972        context: &mut Self::Context<'a>,
2973        dst: Register,
2974        src: Source,
2975    ) -> Result<(), String> {
2976        self.alu(Some("arsh"), context, dst, src, AluType::Plain, |x, y| x.ashr(y))
2977    }
2978    fn div<'a>(
2979        &mut self,
2980        context: &mut Self::Context<'a>,
2981        dst: Register,
2982        src: Source,
2983    ) -> Result<(), String> {
2984        self.alu(Some("div32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x / y))
2985    }
2986    fn div64<'a>(
2987        &mut self,
2988        context: &mut Self::Context<'a>,
2989        dst: Register,
2990        src: Source,
2991    ) -> Result<(), String> {
2992        self.alu(Some("div"), context, dst, src, AluType::Plain, |x, y| x / y)
2993    }
2994    fn lsh<'a>(
2995        &mut self,
2996        context: &mut Self::Context<'a>,
2997        dst: Register,
2998        src: Source,
2999    ) -> Result<(), String> {
3000        self.alu(Some("lsh32"), context, dst, src, AluType::Plain, |x, y| {
3001            alu32(x, y, |x, y| x << y)
3002        })
3003    }
3004    fn lsh64<'a>(
3005        &mut self,
3006        context: &mut Self::Context<'a>,
3007        dst: Register,
3008        src: Source,
3009    ) -> Result<(), String> {
3010        self.alu(Some("lsh"), context, dst, src, AluType::Plain, |x, y| x << y)
3011    }
3012    fn r#mod<'a>(
3013        &mut self,
3014        context: &mut Self::Context<'a>,
3015        dst: Register,
3016        src: Source,
3017    ) -> Result<(), String> {
3018        self.alu(Some("mod32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x % y))
3019    }
3020    fn mod64<'a>(
3021        &mut self,
3022        context: &mut Self::Context<'a>,
3023        dst: Register,
3024        src: Source,
3025    ) -> Result<(), String> {
3026        self.alu(Some("mod"), context, dst, src, AluType::Plain, |x, y| x % y)
3027    }
3028    fn mov<'a>(
3029        &mut self,
3030        context: &mut Self::Context<'a>,
3031        dst: Register,
3032        src: Source,
3033    ) -> Result<(), String> {
3034        bpf_log!(self, context, "mov32 {}, {}", display_register(dst), display_source(src));
3035        let src = self.compute_source(src)?;
3036        let value = match src {
3037            Type::ScalarValue(data) => {
3038                let value = (data.value as u32) as u64;
3039                let unknown_mask = (data.unknown_mask as u32) as u64;
3040                let unwritten_mask = (data.unwritten_mask as u32) as u64;
3041                let urange = U64Range::compute_range_for_bytes_swap(0.into(), data.urange, 0, 0, 4);
3042                Type::ScalarValue(ScalarValueData::new(value, unknown_mask, unwritten_mask, urange))
3043            }
3044            _ => Type::default(),
3045        };
3046        let mut next = self.next()?;
3047        next.set_reg(dst, value)?;
3048        context.states.push(next);
3049        Ok(())
3050    }
3051    fn mov64<'a>(
3052        &mut self,
3053        context: &mut Self::Context<'a>,
3054        dst: Register,
3055        src: Source,
3056    ) -> Result<(), String> {
3057        bpf_log!(self, context, "mov {}, {}", display_register(dst), display_source(src));
3058        let src = self.compute_source(src)?;
3059        let mut next = self.next()?;
3060        next.set_reg(dst, src)?;
3061        context.states.push(next);
3062        Ok(())
3063    }
3064    fn mul<'a>(
3065        &mut self,
3066        context: &mut Self::Context<'a>,
3067        dst: Register,
3068        src: Source,
3069    ) -> Result<(), String> {
3070        self.alu(Some("mul32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x * y))
3071    }
3072    fn mul64<'a>(
3073        &mut self,
3074        context: &mut Self::Context<'a>,
3075        dst: Register,
3076        src: Source,
3077    ) -> Result<(), String> {
3078        self.alu(Some("mul"), context, dst, src, AluType::Plain, |x, y| x * y)
3079    }
3080    fn or<'a>(
3081        &mut self,
3082        context: &mut Self::Context<'a>,
3083        dst: Register,
3084        src: Source,
3085    ) -> Result<(), String> {
3086        self.alu(Some("or32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x | y))
3087    }
3088    fn or64<'a>(
3089        &mut self,
3090        context: &mut Self::Context<'a>,
3091        dst: Register,
3092        src: Source,
3093    ) -> Result<(), String> {
3094        self.alu(Some("or"), context, dst, src, AluType::Plain, |x, y| x | y)
3095    }
3096    fn rsh<'a>(
3097        &mut self,
3098        context: &mut Self::Context<'a>,
3099        dst: Register,
3100        src: Source,
3101    ) -> Result<(), String> {
3102        self.alu(Some("rsh32"), context, dst, src, AluType::Plain, |x, y| {
3103            alu32(x, y, |x, y| x >> y)
3104        })
3105    }
3106    fn rsh64<'a>(
3107        &mut self,
3108        context: &mut Self::Context<'a>,
3109        dst: Register,
3110        src: Source,
3111    ) -> Result<(), String> {
3112        self.alu(Some("rsh"), context, dst, src, AluType::Plain, |x, y| x >> y)
3113    }
3114    fn sub<'a>(
3115        &mut self,
3116        context: &mut Self::Context<'a>,
3117        dst: Register,
3118        src: Source,
3119    ) -> Result<(), String> {
3120        self.alu(Some("sub32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x - y))
3121    }
3122    fn sub64<'a>(
3123        &mut self,
3124        context: &mut Self::Context<'a>,
3125        dst: Register,
3126        src: Source,
3127    ) -> Result<(), String> {
3128        self.alu(Some("sub"), context, dst, src, AluType::Sub, |x, y| x - y)
3129    }
3130    fn xor<'a>(
3131        &mut self,
3132        context: &mut Self::Context<'a>,
3133        dst: Register,
3134        src: Source,
3135    ) -> Result<(), String> {
3136        self.alu(Some("xor32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x ^ y))
3137    }
3138    fn xor64<'a>(
3139        &mut self,
3140        context: &mut Self::Context<'a>,
3141        dst: Register,
3142        src: Source,
3143    ) -> Result<(), String> {
3144        self.alu(Some("xor"), context, dst, src, AluType::Plain, |x, y| x ^ y)
3145    }
3146
3147    fn neg<'a>(&mut self, context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
3148        bpf_log!(self, context, "neg32 {}", display_register(dst));
3149        self.alu(None, context, dst, Source::Value(0), AluType::Plain, |x, y| {
3150            alu32(x, y, |x, _y| -x)
3151        })
3152    }
3153    fn neg64<'a>(&mut self, context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
3154        bpf_log!(self, context, "neg {}", display_register(dst));
3155        self.alu(None, context, dst, Source::Value(0), AluType::Plain, |x, _y| -x)
3156    }
3157
3158    fn be<'a>(
3159        &mut self,
3160        context: &mut Self::Context<'a>,
3161        dst: Register,
3162        width: DataWidth,
3163    ) -> Result<(), String> {
3164        self.endianness::<BigEndian>("be", context, dst, width)
3165    }
3166
3167    fn le<'a>(
3168        &mut self,
3169        context: &mut Self::Context<'a>,
3170        dst: Register,
3171        width: DataWidth,
3172    ) -> Result<(), String> {
3173        self.endianness::<LittleEndian>("le", context, dst, width)
3174    }
3175
3176    fn call_external<'a>(
3177        &mut self,
3178        context: &mut Self::Context<'a>,
3179        index: u32,
3180    ) -> Result<(), String> {
3181        bpf_log!(self, context, "call 0x{:x}", index);
3182        let Some(signature) = context.calling_context.helpers.get(&index).cloned() else {
3183            return Err(format!("unknown external function {}", index));
3184        };
3185        debug_assert!(signature.args.len() <= 5);
3186        let mut next = self.next()?;
3187        for (index, arg) in signature.args.iter().enumerate() {
3188            let reg = (index + 1) as u8;
3189            self.reg(reg)?.match_parameter_type(self, arg, index, &mut next)?;
3190        }
3191        // Parameters have been validated, specify the return value on return.
3192        if signature.invalidate_array_bounds {
3193            next.array_bounds.clear();
3194        }
3195        let value =
3196            self.resolve_return_value(context, &signature.return_value, &mut next, false)?;
3197        next.set_reg(0, value)?;
3198        for i in 1..=5 {
3199            next.set_reg(i, Type::default())?;
3200        }
3201        context.states.push(next);
3202        Ok(())
3203    }
3204
3205    fn exit<'a>(&mut self, context: &mut Self::Context<'a>) -> Result<(), String> {
3206        bpf_log!(self, context, "exit");
3207        if !self.reg(0)?.is_written_scalar() {
3208            return Err("register 0 is incorrect at exit time".to_string());
3209        }
3210        if !self.resources.is_empty() {
3211            return Err("some resources have not been released at exit time".to_string());
3212        }
3213        let this = self.clone();
3214        this.terminate(context)?;
3215        // Nothing to do, the program terminated with a valid scalar value.
3216        Ok(())
3217    }
3218
3219    fn jump<'a>(&mut self, context: &mut Self::Context<'a>, offset: i16) -> Result<(), String> {
3220        bpf_log!(self, context, "ja {}", offset);
3221        let parent = Some(Arc::new(self.clone()));
3222        context.states.push(self.jump_with_offset(offset, parent)?);
3223        Ok(())
3224    }
3225
3226    fn jeq<'a>(
3227        &mut self,
3228        context: &mut Self::Context<'a>,
3229        dst: Register,
3230        src: Source,
3231        offset: i16,
3232    ) -> Result<(), String> {
3233        self.conditional_jump(
3234            "jeq32",
3235            context,
3236            dst,
3237            src,
3238            offset,
3239            JumpType::Eq,
3240            JumpWidth::W32,
3241            |x, y| {
3242                comp32(x, y, |x, y| {
3243                    // x == y
3244                    if x.min == x.max && x.min == y.min && x.min == y.max {
3245                        return Some(true);
3246                    }
3247                    if x.max < y.min || y.max < x.min {
3248                        return Some(false);
3249                    }
3250                    None
3251                })
3252            },
3253        )
3254    }
3255    fn jeq64<'a>(
3256        &mut self,
3257        context: &mut Self::Context<'a>,
3258        dst: Register,
3259        src: Source,
3260        offset: i16,
3261    ) -> Result<(), String> {
3262        self.conditional_jump(
3263            "jeq",
3264            context,
3265            dst,
3266            src,
3267            offset,
3268            JumpType::Eq,
3269            JumpWidth::W64,
3270            |x, y| {
3271                comp64(x, y, |x, y| {
3272                    // x == y
3273                    if x.min == x.max && x.min == y.min && x.min == y.max {
3274                        return Some(true);
3275                    }
3276                    if x.max < y.min || y.max < x.min {
3277                        return Some(false);
3278                    }
3279                    None
3280                })
3281            },
3282        )
3283    }
3284    fn jne<'a>(
3285        &mut self,
3286        context: &mut Self::Context<'a>,
3287        dst: Register,
3288        src: Source,
3289        offset: i16,
3290    ) -> Result<(), String> {
3291        self.conditional_jump(
3292            "jne32",
3293            context,
3294            dst,
3295            src,
3296            offset,
3297            JumpType::Ne,
3298            JumpWidth::W32,
3299            |x, y| {
3300                comp32(x, y, |x, y| {
3301                    // x != y
3302                    if x.min == x.max && x.min == y.min && x.min == y.max {
3303                        return Some(false);
3304                    }
3305                    if x.max < y.min || y.max < x.min {
3306                        return Some(true);
3307                    }
3308                    None
3309                })
3310            },
3311        )
3312    }
3313    fn jne64<'a>(
3314        &mut self,
3315        context: &mut Self::Context<'a>,
3316        dst: Register,
3317        src: Source,
3318        offset: i16,
3319    ) -> Result<(), String> {
3320        self.conditional_jump(
3321            "jne",
3322            context,
3323            dst,
3324            src,
3325            offset,
3326            JumpType::Ne,
3327            JumpWidth::W64,
3328            |x, y| {
3329                comp64(x, y, |x, y| {
3330                    // x != y
3331                    if x.min == x.max && x.min == y.min && x.min == y.max {
3332                        return Some(false);
3333                    }
3334                    if x.max < y.min || y.max < x.min {
3335                        return Some(true);
3336                    }
3337                    None
3338                })
3339            },
3340        )
3341    }
3342    fn jge<'a>(
3343        &mut self,
3344        context: &mut Self::Context<'a>,
3345        dst: Register,
3346        src: Source,
3347        offset: i16,
3348    ) -> Result<(), String> {
3349        self.conditional_jump(
3350            "jge32",
3351            context,
3352            dst,
3353            src,
3354            offset,
3355            JumpType::Ge,
3356            JumpWidth::W32,
3357            |x, y| {
3358                comp32(x, y, |x, y| {
3359                    // x >= y
3360                    if x.min >= y.max {
3361                        return Some(true);
3362                    }
3363                    if y.min > x.max {
3364                        return Some(false);
3365                    }
3366                    None
3367                })
3368            },
3369        )
3370    }
3371    fn jge64<'a>(
3372        &mut self,
3373        context: &mut Self::Context<'a>,
3374        dst: Register,
3375        src: Source,
3376        offset: i16,
3377    ) -> Result<(), String> {
3378        self.conditional_jump(
3379            "jge",
3380            context,
3381            dst,
3382            src,
3383            offset,
3384            JumpType::Ge,
3385            JumpWidth::W64,
3386            |x, y| {
3387                comp64(x, y, |x, y| {
3388                    // x >= y
3389                    if x.min >= y.max {
3390                        return Some(true);
3391                    }
3392                    if y.min > x.max {
3393                        return Some(false);
3394                    }
3395                    None
3396                })
3397            },
3398        )
3399    }
3400    fn jgt<'a>(
3401        &mut self,
3402        context: &mut Self::Context<'a>,
3403        dst: Register,
3404        src: Source,
3405        offset: i16,
3406    ) -> Result<(), String> {
3407        self.conditional_jump(
3408            "jgt32",
3409            context,
3410            dst,
3411            src,
3412            offset,
3413            JumpType::Gt,
3414            JumpWidth::W32,
3415            |x, y| {
3416                comp32(x, y, |x, y| {
3417                    // x > y
3418                    if x.min > y.max {
3419                        return Some(true);
3420                    }
3421                    if y.min >= x.max {
3422                        return Some(false);
3423                    }
3424                    None
3425                })
3426            },
3427        )
3428    }
3429    fn jgt64<'a>(
3430        &mut self,
3431        context: &mut Self::Context<'a>,
3432        dst: Register,
3433        src: Source,
3434        offset: i16,
3435    ) -> Result<(), String> {
3436        self.conditional_jump(
3437            "jgt",
3438            context,
3439            dst,
3440            src,
3441            offset,
3442            JumpType::Gt,
3443            JumpWidth::W64,
3444            |x, y| {
3445                comp64(x, y, |x, y| {
3446                    // x > y
3447                    if x.min > y.max {
3448                        return Some(true);
3449                    }
3450                    if y.min >= x.max {
3451                        return Some(false);
3452                    }
3453                    None
3454                })
3455            },
3456        )
3457    }
3458    fn jle<'a>(
3459        &mut self,
3460        context: &mut Self::Context<'a>,
3461        dst: Register,
3462        src: Source,
3463        offset: i16,
3464    ) -> Result<(), String> {
3465        self.conditional_jump(
3466            "jle32",
3467            context,
3468            dst,
3469            src,
3470            offset,
3471            JumpType::Le,
3472            JumpWidth::W32,
3473            |x, y| {
3474                comp32(x, y, |x, y| {
3475                    // x <= y
3476                    if x.max <= y.min {
3477                        return Some(true);
3478                    }
3479                    if y.max < x.min {
3480                        return Some(false);
3481                    }
3482                    None
3483                })
3484            },
3485        )
3486    }
3487    fn jle64<'a>(
3488        &mut self,
3489        context: &mut Self::Context<'a>,
3490        dst: Register,
3491        src: Source,
3492        offset: i16,
3493    ) -> Result<(), String> {
3494        self.conditional_jump(
3495            "jle",
3496            context,
3497            dst,
3498            src,
3499            offset,
3500            JumpType::Le,
3501            JumpWidth::W64,
3502            |x, y| {
3503                comp64(x, y, |x, y| {
3504                    // x <= y
3505                    if x.max <= y.min {
3506                        return Some(true);
3507                    }
3508                    if y.max < x.min {
3509                        return Some(false);
3510                    }
3511                    None
3512                })
3513            },
3514        )
3515    }
3516    fn jlt<'a>(
3517        &mut self,
3518        context: &mut Self::Context<'a>,
3519        dst: Register,
3520        src: Source,
3521        offset: i16,
3522    ) -> Result<(), String> {
3523        self.conditional_jump(
3524            "jlt32",
3525            context,
3526            dst,
3527            src,
3528            offset,
3529            JumpType::Lt,
3530            JumpWidth::W32,
3531            |x, y| {
3532                comp32(x, y, |x, y| {
3533                    // x < y
3534                    if x.max < y.min {
3535                        return Some(true);
3536                    }
3537                    if y.max <= x.min {
3538                        return Some(false);
3539                    }
3540                    None
3541                })
3542            },
3543        )
3544    }
3545    fn jlt64<'a>(
3546        &mut self,
3547        context: &mut Self::Context<'a>,
3548        dst: Register,
3549        src: Source,
3550        offset: i16,
3551    ) -> Result<(), String> {
3552        self.conditional_jump(
3553            "jlt",
3554            context,
3555            dst,
3556            src,
3557            offset,
3558            JumpType::Lt,
3559            JumpWidth::W64,
3560            |x, y| {
3561                comp64(x, y, |x, y| {
3562                    // x < y
3563                    if x.max < y.min {
3564                        return Some(true);
3565                    }
3566                    if y.max <= x.min {
3567                        return Some(false);
3568                    }
3569                    None
3570                })
3571            },
3572        )
3573    }
3574    fn jsge<'a>(
3575        &mut self,
3576        context: &mut Self::Context<'a>,
3577        dst: Register,
3578        src: Source,
3579        offset: i16,
3580    ) -> Result<(), String> {
3581        self.conditional_jump(
3582            "jsge32",
3583            context,
3584            dst,
3585            src,
3586            offset,
3587            JumpType::LooseComparaison,
3588            JumpWidth::W32,
3589            |x, y| scomp32(x, y, |x, y| x >= y),
3590        )
3591    }
3592    fn jsge64<'a>(
3593        &mut self,
3594        context: &mut Self::Context<'a>,
3595        dst: Register,
3596        src: Source,
3597        offset: i16,
3598    ) -> Result<(), String> {
3599        self.conditional_jump(
3600            "jsge",
3601            context,
3602            dst,
3603            src,
3604            offset,
3605            JumpType::LooseComparaison,
3606            JumpWidth::W64,
3607            |x, y| scomp64(x, y, |x, y| x >= y),
3608        )
3609    }
3610    fn jsgt<'a>(
3611        &mut self,
3612        context: &mut Self::Context<'a>,
3613        dst: Register,
3614        src: Source,
3615        offset: i16,
3616    ) -> Result<(), String> {
3617        self.conditional_jump(
3618            "jsgt32",
3619            context,
3620            dst,
3621            src,
3622            offset,
3623            JumpType::StrictComparaison,
3624            JumpWidth::W32,
3625            |x, y| scomp32(x, y, |x, y| x > y),
3626        )
3627    }
3628    fn jsgt64<'a>(
3629        &mut self,
3630        context: &mut Self::Context<'a>,
3631        dst: Register,
3632        src: Source,
3633        offset: i16,
3634    ) -> Result<(), String> {
3635        self.conditional_jump(
3636            "jsgt",
3637            context,
3638            dst,
3639            src,
3640            offset,
3641            JumpType::StrictComparaison,
3642            JumpWidth::W64,
3643            |x, y| scomp64(x, y, |x, y| x > y),
3644        )
3645    }
3646    fn jsle<'a>(
3647        &mut self,
3648        context: &mut Self::Context<'a>,
3649        dst: Register,
3650        src: Source,
3651        offset: i16,
3652    ) -> Result<(), String> {
3653        self.conditional_jump(
3654            "jsle32",
3655            context,
3656            dst,
3657            src,
3658            offset,
3659            JumpType::LooseComparaison,
3660            JumpWidth::W32,
3661            |x, y| scomp32(x, y, |x, y| x <= y),
3662        )
3663    }
3664    fn jsle64<'a>(
3665        &mut self,
3666        context: &mut Self::Context<'a>,
3667        dst: Register,
3668        src: Source,
3669        offset: i16,
3670    ) -> Result<(), String> {
3671        self.conditional_jump(
3672            "jsle",
3673            context,
3674            dst,
3675            src,
3676            offset,
3677            JumpType::LooseComparaison,
3678            JumpWidth::W64,
3679            |x, y| scomp64(x, y, |x, y| x <= y),
3680        )
3681    }
3682    fn jslt<'a>(
3683        &mut self,
3684        context: &mut Self::Context<'a>,
3685        dst: Register,
3686        src: Source,
3687        offset: i16,
3688    ) -> Result<(), String> {
3689        self.conditional_jump(
3690            "jslt32",
3691            context,
3692            dst,
3693            src,
3694            offset,
3695            JumpType::StrictComparaison,
3696            JumpWidth::W32,
3697            |x, y| scomp32(x, y, |x, y| x < y),
3698        )
3699    }
3700    fn jslt64<'a>(
3701        &mut self,
3702        context: &mut Self::Context<'a>,
3703        dst: Register,
3704        src: Source,
3705        offset: i16,
3706    ) -> Result<(), String> {
3707        self.conditional_jump(
3708            "jslt",
3709            context,
3710            dst,
3711            src,
3712            offset,
3713            JumpType::StrictComparaison,
3714            JumpWidth::W64,
3715            |x, y| scomp64(x, y, |x, y| x < y),
3716        )
3717    }
3718    fn jset<'a>(
3719        &mut self,
3720        context: &mut Self::Context<'a>,
3721        dst: Register,
3722        src: Source,
3723        offset: i16,
3724    ) -> Result<(), String> {
3725        self.conditional_jump(
3726            "jset32",
3727            context,
3728            dst,
3729            src,
3730            offset,
3731            JumpType::Unknown,
3732            JumpWidth::W32,
3733            |x, y| {
3734                comp32(x, y, |x, y| {
3735                    // x & y != 0
3736                    if x.min != x.max || y.min != y.max {
3737                        return None;
3738                    }
3739                    Some(x.min & y.min != 0)
3740                })
3741            },
3742        )
3743    }
3744    fn jset64<'a>(
3745        &mut self,
3746        context: &mut Self::Context<'a>,
3747        dst: Register,
3748        src: Source,
3749        offset: i16,
3750    ) -> Result<(), String> {
3751        self.conditional_jump(
3752            "jset",
3753            context,
3754            dst,
3755            src,
3756            offset,
3757            JumpType::Unknown,
3758            JumpWidth::W64,
3759            |x, y| {
3760                comp64(x, y, |x, y| {
3761                    // x & y != 0
3762                    if x.min != x.max || y.min != y.max {
3763                        return None;
3764                    }
3765                    Some(x.min & y.min != 0)
3766                })
3767            },
3768        )
3769    }
3770
3771    fn atomic_add<'a>(
3772        &mut self,
3773        context: &mut Self::Context<'a>,
3774        fetch: bool,
3775        dst: Register,
3776        offset: i16,
3777        src: Register,
3778    ) -> Result<(), String> {
3779        self.atomic_operation(
3780            "add32",
3781            context,
3782            DataWidth::U32,
3783            fetch,
3784            dst,
3785            offset,
3786            src,
3787            AluType::Add,
3788            |x, y| alu32(x, y, |x, y| x + y),
3789        )
3790    }
3791
3792    fn atomic_add64<'a>(
3793        &mut self,
3794        context: &mut Self::Context<'a>,
3795        fetch: bool,
3796        dst: Register,
3797        offset: i16,
3798        src: Register,
3799    ) -> Result<(), String> {
3800        self.atomic_operation(
3801            "add",
3802            context,
3803            DataWidth::U64,
3804            fetch,
3805            dst,
3806            offset,
3807            src,
3808            AluType::Add,
3809            |x, y| x + y,
3810        )
3811    }
3812
3813    fn atomic_and<'a>(
3814        &mut self,
3815        context: &mut Self::Context<'a>,
3816        fetch: bool,
3817        dst: Register,
3818        offset: i16,
3819        src: Register,
3820    ) -> Result<(), String> {
3821        self.atomic_operation(
3822            "and32",
3823            context,
3824            DataWidth::U32,
3825            fetch,
3826            dst,
3827            offset,
3828            src,
3829            AluType::Plain,
3830            |x, y| alu32(x, y, |x, y| x & y),
3831        )
3832    }
3833
3834    fn atomic_and64<'a>(
3835        &mut self,
3836        context: &mut Self::Context<'a>,
3837        fetch: bool,
3838        dst: Register,
3839        offset: i16,
3840        src: Register,
3841    ) -> Result<(), String> {
3842        self.atomic_operation(
3843            "and",
3844            context,
3845            DataWidth::U64,
3846            fetch,
3847            dst,
3848            offset,
3849            src,
3850            AluType::Plain,
3851            |x, y| x & y,
3852        )
3853    }
3854
3855    fn atomic_or<'a>(
3856        &mut self,
3857        context: &mut Self::Context<'a>,
3858        fetch: bool,
3859        dst: Register,
3860        offset: i16,
3861        src: Register,
3862    ) -> Result<(), String> {
3863        self.atomic_operation(
3864            "or32",
3865            context,
3866            DataWidth::U32,
3867            fetch,
3868            dst,
3869            offset,
3870            src,
3871            AluType::Plain,
3872            |x, y| alu32(x, y, |x, y| x | y),
3873        )
3874    }
3875
3876    fn atomic_or64<'a>(
3877        &mut self,
3878        context: &mut Self::Context<'a>,
3879        fetch: bool,
3880        dst: Register,
3881        offset: i16,
3882        src: Register,
3883    ) -> Result<(), String> {
3884        self.atomic_operation(
3885            "or",
3886            context,
3887            DataWidth::U64,
3888            fetch,
3889            dst,
3890            offset,
3891            src,
3892            AluType::Plain,
3893            |x, y| x | y,
3894        )
3895    }
3896
3897    fn atomic_xor<'a>(
3898        &mut self,
3899        context: &mut Self::Context<'a>,
3900        fetch: bool,
3901        dst: Register,
3902        offset: i16,
3903        src: Register,
3904    ) -> Result<(), String> {
3905        self.atomic_operation(
3906            "xor32",
3907            context,
3908            DataWidth::U32,
3909            fetch,
3910            dst,
3911            offset,
3912            src,
3913            AluType::Plain,
3914            |x, y| alu32(x, y, |x, y| x ^ y),
3915        )
3916    }
3917
3918    fn atomic_xor64<'a>(
3919        &mut self,
3920        context: &mut Self::Context<'a>,
3921        fetch: bool,
3922        dst: Register,
3923        offset: i16,
3924        src: Register,
3925    ) -> Result<(), String> {
3926        self.atomic_operation(
3927            "xor",
3928            context,
3929            DataWidth::U64,
3930            fetch,
3931            dst,
3932            offset,
3933            src,
3934            AluType::Plain,
3935            |x, y| x ^ y,
3936        )
3937    }
3938
3939    fn atomic_xchg<'a>(
3940        &mut self,
3941        context: &mut Self::Context<'a>,
3942        fetch: bool,
3943        dst: Register,
3944        offset: i16,
3945        src: Register,
3946    ) -> Result<(), String> {
3947        self.atomic_operation(
3948            "xchg32",
3949            context,
3950            DataWidth::U32,
3951            fetch,
3952            dst,
3953            offset,
3954            src,
3955            AluType::Plain,
3956            |_, x| x,
3957        )
3958    }
3959
3960    fn atomic_xchg64<'a>(
3961        &mut self,
3962        context: &mut Self::Context<'a>,
3963        fetch: bool,
3964        dst: Register,
3965        offset: i16,
3966        src: Register,
3967    ) -> Result<(), String> {
3968        self.raw_atomic_operation(
3969            "xchg",
3970            context,
3971            DataWidth::U64,
3972            fetch,
3973            dst,
3974            offset,
3975            src,
3976            |_, _, x| Ok(x),
3977        )
3978    }
3979
3980    fn atomic_cmpxchg<'a>(
3981        &mut self,
3982        context: &mut Self::Context<'a>,
3983        dst: Register,
3984        offset: i16,
3985        src: Register,
3986    ) -> Result<(), String> {
3987        self.raw_atomic_cmpxchg("cmpxchg32", context, dst, offset, src, JumpWidth::W32, |x, y| {
3988            comp32(x, y, |x, y| {
3989                // x == y
3990                if x.min == x.max && x.min == y.min && x.min == y.max {
3991                    return Some(true);
3992                }
3993                if x.max < y.min || y.max < x.min {
3994                    return Some(false);
3995                }
3996                None
3997            })
3998        })
3999    }
4000
4001    fn atomic_cmpxchg64<'a>(
4002        &mut self,
4003        context: &mut Self::Context<'a>,
4004        dst: Register,
4005        offset: i16,
4006        src: Register,
4007    ) -> Result<(), String> {
4008        self.raw_atomic_cmpxchg("cmpxchg", context, dst, offset, src, JumpWidth::W64, |x, y| {
4009            comp64(x, y, |x, y| {
4010                // x == y
4011                if x.min == x.max && x.min == y.min && x.min == y.max {
4012                    return Some(true);
4013                }
4014                if x.max < y.min || y.max < x.min {
4015                    return Some(false);
4016                }
4017                None
4018            })
4019        })
4020    }
4021
4022    fn load<'a>(
4023        &mut self,
4024        context: &mut Self::Context<'a>,
4025        dst: Register,
4026        offset: i16,
4027        src: Register,
4028        width: DataWidth,
4029    ) -> Result<(), String> {
4030        bpf_log!(
4031            self,
4032            context,
4033            "ldx{} {}, [{}{}]",
4034            width.str(),
4035            display_register(dst),
4036            display_register(src),
4037            print_offset(offset)
4038        );
4039        let addr = self.reg(src)?;
4040        let loaded_type = self.load_memory(context, &addr, Field::new(offset, width))?;
4041        let mut next = self.next()?;
4042        next.set_reg(dst, loaded_type)?;
4043        context.states.push(next);
4044        Ok(())
4045    }
4046
4047    fn load64<'a>(
4048        &mut self,
4049        context: &mut Self::Context<'a>,
4050        dst: Register,
4051        src: u8,
4052        lower: u32,
4053    ) -> Result<(), String> {
4054        let Some(next_instruction) = context.code.get(self.pc + 1) else {
4055            return Err(format!("incomplete lddw"));
4056        };
4057        if next_instruction.src_reg() != 0 || next_instruction.dst_reg() != 0 {
4058            return Err(format!("invalid lddw"));
4059        }
4060
4061        let value = match src {
4062            0 => {
4063                let value = (lower as u64) | (((next_instruction.imm() as u32) as u64) << 32);
4064                bpf_log!(self, context, "lddw {}, 0x{:x}", display_register(dst), value);
4065                Type::from(value)
4066            }
4067            BPF_PSEUDO_MAP_IDX => {
4068                let map_index = lower;
4069                bpf_log!(
4070                    self,
4071                    context,
4072                    "lddw {}, map_by_index({:x})",
4073                    display_register(dst),
4074                    map_index
4075                );
4076                context
4077                    .calling_context
4078                    .maps
4079                    .get(usize::try_from(map_index).unwrap())
4080                    .map(|schema| Type::ConstPtrToMap { id: map_index.into(), schema: *schema })
4081                    .ok_or_else(|| format!("lddw with invalid map index: {}", map_index))?
4082            }
4083            BPF_PSEUDO_MAP_IDX_VALUE => {
4084                let map_index = lower;
4085                let offset = next_instruction.imm();
4086                bpf_log!(
4087                    self,
4088                    context,
4089                    "lddw {}, map_value_by_index({:x})+{offset}",
4090                    display_register(dst),
4091                    map_index
4092                );
4093                let id = context.next_id();
4094                let map_schema = context
4095                    .calling_context
4096                    .maps
4097                    .get(usize::try_from(map_index).unwrap())
4098                    .ok_or_else(|| format!("lddw with invalid map index: {}", map_index))?;
4099
4100                if map_schema.map_type != bpf_map_type_BPF_MAP_TYPE_ARRAY {
4101                    return Err(format!(
4102                        "Invalid map type at index {map_index} for lddw. Expecting array."
4103                    ));
4104                }
4105                if map_schema.max_entries == 0 {
4106                    return Err(format!("Array has no entry."));
4107                }
4108
4109                Type::PtrToMemory {
4110                    id: MemoryId::from(id),
4111                    offset: offset.into(),
4112                    buffer_size: map_schema.value_size.into(),
4113                }
4114            }
4115            _ => {
4116                return Err(format!("invalid lddw"));
4117            }
4118        };
4119
4120        let parent = Some(Arc::new(self.clone()));
4121        let mut next = self.jump_with_offset(1, parent)?;
4122        next.set_reg(dst, value.into())?;
4123
4124        context.states.push(next);
4125        Ok(())
4126    }
4127
4128    fn load_from_packet<'a>(
4129        &mut self,
4130        context: &mut Self::Context<'a>,
4131        dst: Register,
4132        src: Register,
4133        offset: i32,
4134        register_offset: Option<Register>,
4135        width: DataWidth,
4136    ) -> Result<(), String> {
4137        bpf_log!(
4138            self,
4139            context,
4140            "ldp{} {}{}",
4141            width.str(),
4142            register_offset.map(display_register).unwrap_or_else(Default::default),
4143            print_offset(offset)
4144        );
4145
4146        // Verify that `src` refers to a packet.
4147        let src_type = self.reg(src)?;
4148        let src_is_packet = match &context.calling_context.packet_type {
4149            Some(packet_type) => src_type == *packet_type,
4150            None => false,
4151        };
4152        if !src_is_packet {
4153            return Err(format!("R{} is not a packet", src));
4154        }
4155
4156        if let Some(reg) = register_offset {
4157            let reg = self.reg(reg)?;
4158            if !reg.is_written_scalar() {
4159                return Err("access to unwritten offset".to_string());
4160            }
4161        }
4162        // Handle the case where the load succeed.
4163        let mut next = self.next()?;
4164        next.set_reg(dst, Type::UNKNOWN_SCALAR)?;
4165        for i in 1..=5 {
4166            next.set_reg(i, Type::default())?;
4167        }
4168        context.states.push(next);
4169        // Handle the case where the load fails.
4170        if !self.reg(0)?.is_written_scalar() {
4171            return Err("register 0 is incorrect at exit time".to_string());
4172        }
4173        if !self.resources.is_empty() {
4174            return Err("some resources have not been released at exit time".to_string());
4175        }
4176        let this = self.clone();
4177        this.terminate(context)?;
4178        // Nothing to do, the program terminated with a valid scalar value.
4179        Ok(())
4180    }
4181
4182    fn store<'a>(
4183        &mut self,
4184        context: &mut Self::Context<'a>,
4185        dst: Register,
4186        offset: i16,
4187        src: Source,
4188        width: DataWidth,
4189    ) -> Result<(), String> {
4190        let value = match src {
4191            Source::Reg(r) => {
4192                bpf_log!(
4193                    self,
4194                    context,
4195                    "stx{} [{}{}], {}",
4196                    width.str(),
4197                    display_register(dst),
4198                    print_offset(offset),
4199                    display_register(r),
4200                );
4201                self.reg(r)?
4202            }
4203            Source::Value(v) => {
4204                bpf_log!(
4205                    self,
4206                    context,
4207                    "st{} [{}{}], 0x{:x}",
4208                    width.str(),
4209                    display_register(dst),
4210                    print_offset(offset),
4211                    v,
4212                );
4213                Type::from(v & Type::mask(width))
4214            }
4215        };
4216        let mut next = self.next()?;
4217        let addr = self.reg(dst)?;
4218        next.store_memory(context, &addr, Field::new(offset, width), value)?;
4219        context.states.push(next);
4220        Ok(())
4221    }
4222}
4223
4224fn alu32(
4225    x: ScalarValueData,
4226    y: ScalarValueData,
4227    op: impl FnOnce(U32ScalarValueData, U32ScalarValueData) -> U32ScalarValueData,
4228) -> ScalarValueData {
4229    op(U32ScalarValueData::from(x), U32ScalarValueData::from(y)).into()
4230}
4231
4232fn comp64(
4233    x: ScalarValueData,
4234    y: ScalarValueData,
4235    op: impl FnOnce(U64Range, U64Range) -> Option<bool>,
4236) -> Result<Option<bool>, ()> {
4237    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4238        return Err(());
4239    }
4240    Ok(op(x.urange, y.urange))
4241}
4242
4243fn comp32(
4244    x: ScalarValueData,
4245    y: ScalarValueData,
4246    op: impl FnOnce(U32Range, U32Range) -> Option<bool>,
4247) -> Result<Option<bool>, ()> {
4248    let x = U32ScalarValueData::from(x);
4249    let y = U32ScalarValueData::from(y);
4250    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4251        return Err(());
4252    }
4253    Ok(op(x.urange, y.urange))
4254}
4255
4256fn scomp64(
4257    x: ScalarValueData,
4258    y: ScalarValueData,
4259    op: impl FnOnce(i64, i64) -> bool,
4260) -> Result<Option<bool>, ()> {
4261    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4262        return Err(());
4263    }
4264    if !x.is_known() || !y.is_known() {
4265        return Ok(None);
4266    }
4267    Ok(Some(op(x.value as i64, y.value as i64)))
4268}
4269
4270fn scomp32(
4271    x: ScalarValueData,
4272    y: ScalarValueData,
4273    op: impl FnOnce(i32, i32) -> bool,
4274) -> Result<Option<bool>, ()> {
4275    let x = U32ScalarValueData::from(x);
4276    let y = U32ScalarValueData::from(y);
4277    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4278        return Err(());
4279    }
4280    if !x.is_known() || !y.is_known() {
4281        return Ok(None);
4282    }
4283    Ok(Some(op(x.value as i32, y.value as i32)))
4284}
4285
4286fn print_offset<T: Into<i32>>(offset: T) -> String {
4287    let offset: i32 = offset.into();
4288    if offset == 0 {
4289        String::new()
4290    } else if offset > 0 {
4291        format!("+{offset}")
4292    } else {
4293        format!("{offset}")
4294    }
4295}
4296
4297fn run_on_stack_offset<F>(v: StackOffset, f: F) -> StackOffset
4298where
4299    F: FnOnce(ScalarValueData) -> ScalarValueData,
4300{
4301    StackOffset(f(v.reg()))
4302}
4303
4304fn error_and_log<T>(
4305    logger: &mut dyn VerifierLogger,
4306    msg: impl std::string::ToString,
4307) -> Result<T, EbpfError> {
4308    let msg = msg.to_string();
4309    logger.log(msg.as_bytes());
4310    return Err(EbpfError::ProgramVerifyError(msg));
4311}
4312
4313fn associate_orderings(o1: Ordering, o2: Ordering) -> Option<Ordering> {
4314    match (o1, o2) {
4315        (o1, o2) if o1 == o2 => Some(o1),
4316        (o, Ordering::Equal) | (Ordering::Equal, o) => Some(o),
4317        _ => None,
4318    }
4319}
4320
4321#[cfg(test)]
4322mod tests {
4323    use super::*;
4324    use std::collections::BTreeSet;
4325    use test_util::{assert_geq, assert_leq};
4326
4327    #[test]
4328    fn test_type_ordering() {
4329        let t0 = Type::from(0);
4330        let t1 = Type::from(1);
4331        let random = Type::AliasParameter { parameter_index: 8 };
4332        let unknown_written = Type::UNKNOWN_SCALAR;
4333        let unwritten = Type::default();
4334
4335        assert_eq!(t0.partial_cmp(&t0), Some(Ordering::Equal));
4336        assert_eq!(t0.partial_cmp(&t1), None);
4337        assert_eq!(t0.partial_cmp(&random), None);
4338        assert_eq!(t0.partial_cmp(&unknown_written), Some(Ordering::Less));
4339        assert_eq!(t0.partial_cmp(&unwritten), Some(Ordering::Less));
4340
4341        assert_eq!(t1.partial_cmp(&t0), None);
4342        assert_eq!(t1.partial_cmp(&t1), Some(Ordering::Equal));
4343        assert_eq!(t1.partial_cmp(&random), None);
4344        assert_eq!(t1.partial_cmp(&unknown_written), Some(Ordering::Less));
4345        assert_eq!(t1.partial_cmp(&unwritten), Some(Ordering::Less));
4346
4347        assert_eq!(random.partial_cmp(&t0), None);
4348        assert_eq!(random.partial_cmp(&t1), None);
4349        assert_eq!(random.partial_cmp(&random), Some(Ordering::Equal));
4350        assert_eq!(random.partial_cmp(&unknown_written), None);
4351        assert_eq!(random.partial_cmp(&unwritten), Some(Ordering::Less));
4352
4353        assert_eq!(unknown_written.partial_cmp(&t0), Some(Ordering::Greater));
4354        assert_eq!(unknown_written.partial_cmp(&t1), Some(Ordering::Greater));
4355        assert_eq!(unknown_written.partial_cmp(&random), None);
4356        assert_eq!(unknown_written.partial_cmp(&unknown_written), Some(Ordering::Equal));
4357        assert_eq!(unknown_written.partial_cmp(&unwritten), Some(Ordering::Less));
4358
4359        assert_eq!(unwritten.partial_cmp(&t0), Some(Ordering::Greater));
4360        assert_eq!(unwritten.partial_cmp(&t1), Some(Ordering::Greater));
4361        assert_eq!(unwritten.partial_cmp(&random), Some(Ordering::Greater));
4362        assert_eq!(unwritten.partial_cmp(&unknown_written), Some(Ordering::Greater));
4363        assert_eq!(unwritten.partial_cmp(&unwritten), Some(Ordering::Equal));
4364    }
4365
4366    #[test]
4367    fn test_stack_ordering() {
4368        let mut s1 = Stack::default();
4369        let mut s2 = Stack::default();
4370
4371        assert_eq!(s1.partial_cmp(&s2), Some(Ordering::Equal));
4372        s1.set(0, 0.into());
4373        assert_eq!(s1.partial_cmp(&s2), Some(Ordering::Less));
4374        assert_eq!(s2.partial_cmp(&s1), Some(Ordering::Greater));
4375        s2.set(1, 1.into());
4376        assert_eq!(s1.partial_cmp(&s2), None);
4377        assert_eq!(s2.partial_cmp(&s1), None);
4378    }
4379
4380    #[test]
4381    fn test_context_ordering() {
4382        let mut c1 = ComputationContext::default();
4383        let mut c2 = ComputationContext::default();
4384
4385        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Equal));
4386
4387        c1.array_bounds.insert(1.into(), 5);
4388        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Less));
4389        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Greater));
4390
4391        c2.array_bounds.insert(1.into(), 7);
4392        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4393        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4394
4395        c1.array_bounds.insert(2.into(), 9);
4396        assert_eq!(c1.partial_cmp(&c2), None);
4397        assert_eq!(c2.partial_cmp(&c1), None);
4398
4399        c2.array_bounds.insert(2.into(), 9);
4400        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4401        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4402
4403        c2.array_bounds.insert(3.into(), 12);
4404        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4405        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4406
4407        c1.pc = 8;
4408        assert_eq!(c1.partial_cmp(&c2), None);
4409        assert_eq!(c2.partial_cmp(&c1), None);
4410    }
4411
4412    #[test]
4413    fn test_stack_access() {
4414        let mut s = Stack::default();
4415
4416        // Store data in the range [8, 26) and verify that `read_data_ptr()` fails for any
4417        // reads outside of that range.
4418        assert!(s.store(StackOffset(8.into()), Type::UNKNOWN_SCALAR, DataWidth::U64).is_ok());
4419        assert!(s.store(StackOffset(16.into()), Type::UNKNOWN_SCALAR, DataWidth::U64).is_ok());
4420        assert!(s.store(StackOffset(24.into()), Type::UNKNOWN_SCALAR, DataWidth::U16).is_ok());
4421
4422        for offset in 0..32 {
4423            for end in (offset + 1)..32 {
4424                assert_eq!(
4425                    s.read_data_ptr(2, StackOffset(offset.into()), (end - offset) as u64).is_ok(),
4426                    offset >= 8 && end <= 26
4427                );
4428            }
4429        }
4430
4431        // Verify that overflows are handled properly.
4432        assert!(s.read_data_ptr(2, StackOffset(12.into()), u64::MAX - 2).is_err());
4433    }
4434
4435    #[test]
4436    fn test_compute_range_for_bytes_swap() {
4437        // Build a list of interesting values. Interesting values are all possible combination of
4438        // bytes being either 0, max value, or an intermediary value.
4439        let mut values = BTreeSet::<u64>::default();
4440        for v1 in &[0x00, 0x1, u64::MAX] {
4441            for v2 in &[0x00, 0x1, u64::MAX] {
4442                for v3 in &[0x00, 0x1, u64::MAX] {
4443                    values.insert(U64Range::assemble_slices((*v1, *v2, *v3), 1, 1));
4444                }
4445            }
4446        }
4447        // Replace the second byte of old by the first byte of new and return the result.
4448        let store = |old: u64, new: u64| (old & !0xff00) | ((new & 0xff) << 8);
4449
4450        for old in &values {
4451            for new in &values {
4452                let s = store(*old, *new);
4453                for min_old in values.iter().filter(|v| *v <= old) {
4454                    for min_new in values.iter().filter(|v| *v <= new) {
4455                        for max_old in values.iter().filter(|v| *v >= old) {
4456                            for max_new in values.iter().filter(|v| *v >= new) {
4457                                let range = U64Range::compute_range_for_bytes_swap(
4458                                    U64Range::new(*min_old, *max_old),
4459                                    U64Range::new(*min_new, *max_new),
4460                                    1,
4461                                    0,
4462                                    1,
4463                                );
4464                                assert_leq!(range.min, s);
4465                                assert_geq!(range.max, s);
4466                            }
4467                        }
4468                    }
4469                }
4470            }
4471        }
4472    }
4473}