Skip to main content

ebpf/
verifier.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#![allow(unused_variables)]
6
7use crate::scalar_value::{ScalarValueData, U32Range, U32ScalarValueData, U64Range};
8use crate::visitor::{BpfVisitor, ProgramCounter, Register, Source};
9use crate::{
10    BPF_MAX_INSTS, BPF_PSEUDO_MAP_IDX, BPF_PSEUDO_MAP_IDX_VALUE, BPF_STACK_SIZE, DataWidth,
11    EbpfError, EbpfInstruction, GENERAL_REGISTER_COUNT, MapSchema, REGISTER_COUNT,
12};
13use byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian};
14use fuchsia_sync::Mutex;
15use linux_uapi::bpf_map_type_BPF_MAP_TYPE_ARRAY;
16use std::cmp::Ordering;
17use std::collections::{BTreeMap, HashMap, HashSet};
18use std::sync::Arc;
19use zerocopy::IntoBytes;
20
21const U32_MAX: u64 = u32::MAX as u64;
22
23/// A trait to receive the log from the verifier.
24pub trait VerifierLogger {
25    /// Log a line. The line is always a correct encoded ASCII string.
26    fn log(&mut self, line: &[u8]);
27}
28
29/// A `VerifierLogger` that drops all its content.
30pub struct NullVerifierLogger;
31
32impl VerifierLogger for NullVerifierLogger {
33    fn log(&mut self, line: &[u8]) {
34        debug_assert!(line.is_ascii());
35    }
36}
37
38/// An identifier for a memory buffer accessible by an ebpf program. The identifiers are built as a
39/// chain of unique identifier so that a buffer can contain multiple pointers to the same type and
40/// the verifier can distinguish between the different instances.
41#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
42pub struct MemoryId {
43    id: u64,
44    parent: Option<Box<MemoryId>>,
45}
46
47impl MemoryId {
48    pub fn id(&self) -> u64 {
49        self.id
50    }
51}
52
53impl From<u64> for MemoryId {
54    fn from(id: u64) -> Self {
55        Self { id, parent: None }
56    }
57}
58
59/// A counter that allows to generate new ids for parameters. The namespace is the same as for id
60/// generated for types while verifying an ebpf program, but it is started a u64::MAX / 2 and so is
61/// guaranteed to never collide because the number of instruction of an ebpf program are bounded.
62static BPF_TYPE_IDENTIFIER_COUNTER: std::sync::atomic::AtomicU64 =
63    std::sync::atomic::AtomicU64::new(u64::MAX / 2);
64
65impl MemoryId {
66    pub fn new() -> MemoryId {
67        Self {
68            id: BPF_TYPE_IDENTIFIER_COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
69            parent: None,
70        }
71    }
72
73    /// Build a new id such that `other` is prepended to the chain of parent of `self`.
74    fn prepended(&self, other: MemoryId) -> Self {
75        match &self.parent {
76            None => MemoryId { id: self.id, parent: Some(Box::new(other)) },
77            Some(parent) => {
78                MemoryId { id: self.id, parent: Some(Box::new(parent.prepended(other))) }
79            }
80        }
81    }
82
83    /// Returns whether the parent of `self` is `parent`
84    fn has_parent(&self, parent: &MemoryId) -> bool {
85        match &self.parent {
86            None => false,
87            Some(p) => p.as_ref() == parent,
88        }
89    }
90
91    /// Returns whether `self` and `other` represent the same type. This is true if both have the
92    /// same id and if they both have parent, parents must also match.
93    fn matches(&self, other: &MemoryId) -> bool {
94        if self.id != other.id {
95            return false;
96        };
97        match (&self.parent, &other.parent) {
98            (Some(p1), Some(p2)) => p1.matches(p2.as_ref()),
99            _ => true,
100        }
101    }
102}
103
104/// The type of a field in a struct pointed by `Type::PtrToStruct`.
105#[derive(Clone, Debug, PartialEq)]
106pub enum FieldType {
107    /// Read-only scalar value.
108    Scalar { size: usize },
109
110    /// Mutable scalar value.
111    MutableScalar { size: usize },
112
113    /// A pointer to the kernel memory. The full buffer is `buffer_size` bytes long.
114    PtrToMemory { is_32_bit: bool, id: MemoryId, buffer_size: usize },
115
116    /// A nullable pointer to the kernel memory. The full buffer is `buffer_size` bytes long.
117    NullablePtrToMemory { is_32_bit: bool, id: MemoryId, buffer_size: usize },
118
119    /// A pointer to the kernel memory. The full buffer size is determined by an instance of
120    /// `PtrToEndArray` with the same `id`.
121    PtrToArray { is_32_bit: bool, id: MemoryId },
122
123    /// A pointer to the kernel memory that represents the first non accessible byte of a
124    /// `PtrToArray` with the same `id`.
125    PtrToEndArray { is_32_bit: bool, id: MemoryId },
126}
127
128/// Definition of a field in a `Type::PtrToStruct`.
129#[derive(Clone, Debug, PartialEq)]
130pub struct FieldDescriptor {
131    /// The offset at which the field is located.
132    pub offset: usize,
133
134    /// The type of the pointed memory. Currently the verifier supports only `PtrToArray`,
135    /// `PtrToStruct`, `PtrToEndArray` and `ScalarValue`.
136    pub field_type: FieldType,
137}
138
139impl FieldDescriptor {
140    fn size(&self) -> usize {
141        match self.field_type {
142            FieldType::Scalar { size } | FieldType::MutableScalar { size } => size,
143            FieldType::PtrToMemory { is_32_bit, .. }
144            | FieldType::NullablePtrToMemory { is_32_bit, .. }
145            | FieldType::PtrToArray { is_32_bit, .. }
146            | FieldType::PtrToEndArray { is_32_bit, .. } => {
147                if is_32_bit {
148                    4
149                } else {
150                    8
151                }
152            }
153        }
154    }
155}
156
157/// The offset and width of a field in a struct.
158#[derive(Clone, Copy, Debug, Eq, PartialEq)]
159struct Field {
160    offset: i16,
161    width: DataWidth,
162}
163
164impl Field {
165    fn new(offset: i16, width: DataWidth) -> Self {
166        Self { offset, width }
167    }
168}
169
170/// Defines field layout in a struct pointed by `Type::PtrToStruct`.
171#[derive(Debug, PartialEq, Default)]
172pub struct StructDescriptor {
173    /// The list of fields.
174    pub fields: Vec<FieldDescriptor>,
175}
176
177impl StructDescriptor {
178    /// Return true if `self` is a subtype of `super_struct`.
179    fn is_subtype(&self, super_struct: &StructDescriptor) -> bool {
180        // Check that `super_struct.fields` is a subset of `fields`.
181        for super_field in super_struct.fields.iter() {
182            if self.fields.iter().find(|field| *field == super_field).is_none() {
183                return false;
184            }
185        }
186        true
187    }
188
189    /// Finds the field type for load/store at the specified location. None is returned if the
190    /// access is invalid and the program must be rejected.
191    fn find_field(&self, base_offset: ScalarValueData, field: Field) -> Option<&FieldDescriptor> {
192        let offset = base_offset + field.offset;
193        let field_desc = self.fields.iter().find(|f| {
194            f.offset <= offset.min() as usize && (offset.max() as usize) < f.offset + f.size()
195        })?;
196        let is_valid_load = match field_desc.field_type {
197            FieldType::Scalar { size } | FieldType::MutableScalar { size } => {
198                // For scalars check that the access is within the bounds of the field.
199                ((offset + field.width.bytes()).max() as usize) <= field_desc.offset + size
200            }
201            FieldType::PtrToMemory { is_32_bit, .. }
202            | FieldType::NullablePtrToMemory { is_32_bit, .. }
203            | FieldType::PtrToArray { is_32_bit, .. }
204            | FieldType::PtrToEndArray { is_32_bit, .. } => {
205                let expected_width = if is_32_bit { DataWidth::U32 } else { DataWidth::U64 };
206                // Pointer loads are expected to load the whole field.
207                offset.is_known()
208                    && offset.value as usize == field_desc.offset
209                    && field.width == expected_width
210            }
211        };
212
213        is_valid_load.then_some(field_desc)
214    }
215}
216
217#[derive(Clone, Debug, PartialEq)]
218pub enum MemoryParameterSize {
219    /// The memory buffer have the given size.
220    Value(u64),
221    /// The memory buffer size is given by the parameter in the given index.
222    Reference { index: u8 },
223}
224
225impl MemoryParameterSize {
226    fn size(&self, context: &ComputationContext) -> Result<u64, String> {
227        match self {
228            Self::Value(size) => Ok(*size),
229            Self::Reference { index } => {
230                let size_type = context.reg(index + 1)?;
231                match size_type {
232                    Type::ScalarValue(data) if data.is_known() => Ok(data.value),
233                    _ => Err("cannot know buffer size".to_string()),
234                }
235            }
236        }
237    }
238}
239
240#[derive(Clone, Debug, PartialEq)]
241pub enum Type {
242    /// A number.
243    ScalarValue(ScalarValueData),
244    /// A pointer to a map object.
245    ConstPtrToMap { id: u64, schema: MapSchema },
246    /// A pointer into the stack.
247    PtrToStack { offset: StackOffset },
248    /// A pointer to the kernel memory. The full buffer is `buffer_size` bytes long. The pointer is
249    /// situated at `offset` from the start of the buffer.
250    PtrToMemory { id: MemoryId, offset: ScalarValueData, buffer_size: u64 },
251    /// A pointer to a struct with the specified `StructDescriptor`.
252    PtrToStruct { id: MemoryId, offset: ScalarValueData, descriptor: Arc<StructDescriptor> },
253    /// A pointer to the kernel memory. The full buffer size is determined by an instance of
254    /// `PtrToEndArray` with the same `id`. The pointer is situadted at `offset` from the start of
255    /// the buffer.
256    PtrToArray { id: MemoryId, offset: ScalarValueData },
257    /// A pointer to the kernel memory that represents the first non accessible byte of a
258    /// `PtrToArray` with the same `id`.
259    PtrToEndArray { id: MemoryId },
260    /// A pointer that might be null and must be validated before being referenced.
261    NullOr { id: MemoryId, inner: Box<Type> },
262    /// An object that must be passed to a method with an associated `ReleaseParameter` before the
263    /// end of the program.
264    Releasable { id: MemoryId, inner: Box<Type> },
265    /// A function parameter that must be a `ScalarValue` when called.
266    ScalarValueParameter,
267    /// A function parameter that must be a `ConstPtrToMap` when called.
268    ConstPtrToMapParameter,
269    /// A function parameter that must be a key of a map.
270    MapKeyParameter {
271        /// The index in the arguments list that contains a `ConstPtrToMap` for the map this key is
272        /// associated with.
273        map_ptr_index: u8,
274    },
275    /// A function parameter that must be a value of a map.
276    MapValueParameter {
277        /// The index in the arguments list that contains a `ConstPtrToMap` for the map this key is
278        /// associated with.
279        map_ptr_index: u8,
280    },
281    /// A function parameter that must be a pointer to memory.
282    MemoryParameter {
283        /// The index in the arguments list that contains a scalar value containing the size of the
284        /// memory.
285        size: MemoryParameterSize,
286        /// Whether this memory is read by the function.
287        input: bool,
288        /// Whether this memory is written by the function.
289        output: bool,
290    },
291    /// A function return value that is the same type as a parameter.
292    AliasParameter {
293        /// The index in the argument list of the parameter that has the type of this return value.
294        parameter_index: u8,
295    },
296    /// A function return value that is either null, or the given type.
297    NullOrParameter(Box<Type>),
298    /// A function parameter that must be a pointer to memory with the given id.
299    StructParameter { id: MemoryId },
300    /// A function return value that must be passed to a method with an associated
301    /// `ReleaseParameter` before the end of the program.
302    ReleasableParameter { id: MemoryId, inner: Box<Type> },
303    /// A function parameter that will release the value.
304    ReleaseParameter { id: MemoryId },
305    /// A function parameter that is not checked.
306    AnyParameter,
307}
308
309/// Defines a partial ordering on `Type` instances, capturing the notion of how "broad"
310/// a type is in terms of the set of potential values it represents.
311///
312/// The ordering is defined such that `t1 > t2` if a proof that an eBPF program terminates
313/// in a state where a register or memory location has type `t1` is also a proof that
314/// the program terminates in a state where that location has type `t2`.
315///
316/// In other words, a "broader" type represents a larger set of possible values, and
317/// proving termination with a broader type implies termination with any narrower type.
318///
319/// Examples:
320/// * `Type::ScalarValue(ScalarValueData({ unknown_mask: 0, .. }))` (a known scalar value) is less than
321///   `Type::ScalarValue(ScalarValueData({ unknown_mask: u64::MAX, .. }))` (an unknown scalar value).
322impl PartialOrd for Type {
323    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
324        // If the values are equals, return the known result.
325        if self == other {
326            return Some(Ordering::Equal);
327        }
328
329        // If one value is not initialized, the types are ordered.
330        if self == &Type::UNINITIALIZED {
331            return Some(Ordering::Greater);
332        }
333        if other == &Type::UNINITIALIZED {
334            return Some(Ordering::Less);
335        }
336
337        // Otherwise, only scalars are comparables.
338        match (self, other) {
339            (Self::ScalarValue(data1), Self::ScalarValue(data2)) => data1.partial_cmp(data2),
340            _ => None,
341        }
342    }
343}
344
345impl From<ScalarValueData> for Type {
346    fn from(value: ScalarValueData) -> Self {
347        Self::ScalarValue(value)
348    }
349}
350
351impl From<u64> for Type {
352    fn from(value: u64) -> Self {
353        Self::ScalarValue(value.into())
354    }
355}
356
357impl Default for Type {
358    /// A new instance of `Type` where no bit has been written yet.
359    fn default() -> Self {
360        Self::UNINITIALIZED.clone()
361    }
362}
363
364impl Type {
365    /// An uninitialized value.
366    pub const UNINITIALIZED: Self = Self::ScalarValue(ScalarValueData::UNINITIALIZED);
367
368    /// A `Type` where the data is usable by userspace, but the value is not known by the verifier.
369    pub const UNKNOWN_SCALAR: Self = Self::ScalarValue(ScalarValueData::UNKNOWN_WRITTEN);
370
371    /// The mask associated with a data of size `width`.
372    fn mask(width: DataWidth) -> u64 {
373        if width == DataWidth::U64 { u64::MAX } else { (1 << width.bits()) - 1 }
374    }
375
376    /// Return true if `self` is a scalar with all bytes initialized.
377    fn is_written_scalar(&self) -> bool {
378        match self {
379            Self::ScalarValue(data) if data.is_fully_initialized() => true,
380            _ => false,
381        }
382    }
383
384    /// Return true if `self` has all bytes initialized.
385    fn is_initialized(&self) -> bool {
386        match self {
387            Self::ScalarValue(data) => data.is_fully_initialized(),
388            _ => true,
389        }
390    }
391
392    /// Return true if `self` is a subtype of `super_type`.
393    pub fn is_subtype(&self, super_type: &Type) -> bool {
394        match (self, super_type) {
395            // Anything can be used in place of an uninitialized value.
396            (_, Self::ScalarValue(data)) if data.is_uninitialized() => true,
397
398            (
399                Self::PtrToStruct { id: id1, offset: offset1, descriptor: descriptor1 },
400                Self::PtrToStruct { id: id2, offset: offset2, descriptor: descriptor2 },
401            ) => id1 == id2 && offset1 <= offset2 && descriptor1.is_subtype(descriptor2),
402
403            // Every type is a subtype of itself.
404            (self_type, super_type) if self_type == super_type => true,
405
406            _ => false,
407        }
408    }
409
410    /// Return true is `self` is guaranteed to be non-zero
411    pub fn is_non_zero(&self) -> bool {
412        match self {
413            Self::ScalarValue(d) => d.min() > 0,
414            Self::NullOr { .. } => false,
415            _ => true,
416        }
417    }
418
419    fn inner(&self, context: &ComputationContext) -> Result<&Type, String> {
420        match self {
421            Self::Releasable { id, inner } => {
422                if context.resources.contains(id) {
423                    Ok(&inner)
424                } else {
425                    Err("Access to released resource".to_string())
426                }
427            }
428            _ => Ok(self),
429        }
430    }
431
432    /// Constraints `type1` and `type2` for a conditional jump with the specified `jump_type` and
433    /// `jump_width`.
434    fn constraint(
435        context: &mut ComputationContext,
436        jump_type: JumpType,
437        jump_width: JumpWidth,
438        type1: Self,
439        type2: Self,
440    ) -> Result<(Self, Self), String> {
441        let result = match (jump_width, jump_type, type1.inner(context)?, type2.inner(context)?) {
442            (JumpWidth::W64, JumpType::Eq, Type::ScalarValue(data1), Type::ScalarValue(data2))
443                if data1.is_fully_initialized() && data2.is_fully_initialized() =>
444            {
445                let umin = std::cmp::max(data1.min(), data2.min());
446                let umax = std::cmp::min(data1.max(), data2.max());
447                let v = Type::ScalarValue(ScalarValueData::new(
448                    data1.value | data2.value,
449                    data1.unknown_mask & data2.unknown_mask,
450                    0,
451                    U64Range::new(umin, umax),
452                ));
453                (v.clone(), v)
454            }
455            (JumpWidth::W32, JumpType::Eq, Type::ScalarValue(data1), Type::ScalarValue(data2))
456                if data1.is_fully_initialized() && data2.is_fully_initialized() =>
457            {
458                let maybe_umin = if data1.min() <= U32_MAX && data2.min() <= U32_MAX {
459                    Some(std::cmp::max(data1.min(), data2.min()))
460                } else {
461                    None
462                };
463                let maybe_umax = if data1.max() <= U32_MAX && data2.max() <= U32_MAX {
464                    Some(std::cmp::min(data1.max(), data2.max()))
465                } else {
466                    None
467                };
468
469                let urange1 = U64Range::new(
470                    maybe_umin.unwrap_or_else(|| data1.min()),
471                    maybe_umax.unwrap_or_else(|| data1.max()),
472                );
473                let v1 = Type::ScalarValue(ScalarValueData::new(
474                    data1.value | (data2.value & U32_MAX),
475                    data1.unknown_mask & (data2.unknown_mask | (U32_MAX << 32)),
476                    0,
477                    urange1,
478                ));
479
480                let urange2 = U64Range::new(
481                    maybe_umin.unwrap_or_else(|| data2.min()),
482                    maybe_umax.unwrap_or_else(|| data2.max()),
483                );
484                let v2 = Type::ScalarValue(ScalarValueData::new(
485                    data2.value | (data1.value & U32_MAX),
486                    data2.unknown_mask & (data1.unknown_mask | (U32_MAX << 32)),
487                    0,
488                    urange2,
489                ));
490                (v1, v2)
491            }
492            (JumpWidth::W64, JumpType::Eq, Type::ScalarValue(data), Type::NullOr { id, .. })
493            | (JumpWidth::W64, JumpType::Eq, Type::NullOr { id, .. }, Type::ScalarValue(data))
494                if data.is_zero() =>
495            {
496                context.set_null(id, true);
497                let zero = Type::from(0);
498                (zero.clone(), zero)
499            }
500            (JumpWidth::W64, jump_type, Type::NullOr { id, inner }, Type::ScalarValue(data))
501                if jump_type.is_strict() && data.is_zero() =>
502            {
503                context.set_null(id, false);
504                let inner = *inner.clone();
505                inner.register_resource(context);
506                (inner, type2)
507            }
508            (JumpWidth::W64, jump_type, Type::ScalarValue(data), Type::NullOr { id, inner })
509                if jump_type.is_strict() && data.is_zero() =>
510            {
511                context.set_null(id, false);
512                let inner = *inner.clone();
513                inner.register_resource(context);
514                (type1, inner)
515            }
516
517            (JumpWidth::W64, JumpType::Lt, Type::ScalarValue(data1), Type::ScalarValue(data2))
518            | (JumpWidth::W64, JumpType::Gt, Type::ScalarValue(data2), Type::ScalarValue(data1)) => {
519                debug_assert!(data1.min() < u64::MAX);
520                debug_assert!(data2.max() > 0);
521                let new_max1 = std::cmp::min(data1.max(), data2.max() - 1);
522                debug_assert!(data1.min() <= new_max1);
523                let new_min2 = std::cmp::max(data2.min(), data1.min() + 1);
524                debug_assert!(new_min2 <= data2.max());
525                let new_range1 = U64Range::new(data1.min(), new_max1);
526                let new_range2 = U64Range::new(new_min2, data2.max());
527                (data1.update_range(new_range1).into(), data2.update_range(new_range2).into())
528            }
529
530            (JumpWidth::W64, JumpType::Le, Type::ScalarValue(data1), Type::ScalarValue(data2))
531            | (JumpWidth::W64, JumpType::Ge, Type::ScalarValue(data2), Type::ScalarValue(data1)) => {
532                let new_max1 = std::cmp::min(data1.max(), data2.max());
533                debug_assert!(data1.min() <= new_max1);
534                let new_min2 = std::cmp::max(data2.min(), data1.min());
535                debug_assert!(new_min2 <= data2.max());
536                let new_range1 = U64Range::new(data1.min(), new_max1);
537                let new_range2 = U64Range::new(new_min2, data2.max());
538                (data1.update_range(new_range1).into(), data2.update_range(new_range2).into())
539            }
540
541            (
542                JumpWidth::W64,
543                JumpType::Eq,
544                Type::PtrToArray { id: id1, offset },
545                Type::PtrToEndArray { id: id2 },
546            )
547            | (
548                JumpWidth::W64,
549                JumpType::Le,
550                Type::PtrToArray { id: id1, offset },
551                Type::PtrToEndArray { id: id2 },
552            )
553            | (
554                JumpWidth::W64,
555                JumpType::Ge,
556                Type::PtrToEndArray { id: id1 },
557                Type::PtrToArray { id: id2, offset },
558            ) if id1 == id2 => {
559                context.update_array_bounds(id1.clone(), *offset);
560                (type1, type2)
561            }
562            (
563                JumpWidth::W64,
564                JumpType::Lt,
565                Type::PtrToArray { id: id1, offset },
566                Type::PtrToEndArray { id: id2 },
567            )
568            | (
569                JumpWidth::W64,
570                JumpType::Gt,
571                Type::PtrToEndArray { id: id1 },
572                Type::PtrToArray { id: id2, offset },
573            ) if id1 == id2 => {
574                context.update_array_bounds(id1.clone(), *offset + 1);
575                (type1, type2)
576            }
577            (JumpWidth::W64, JumpType::Eq, _, _) => (type1.clone(), type1),
578            _ => (type1, type2),
579        };
580        Ok(result)
581    }
582
583    fn match_parameter_type(
584        &self,
585        context: &ComputationContext,
586        parameter_type: &Type,
587        index: usize,
588        next: &mut ComputationContext,
589    ) -> Result<(), String> {
590        match (parameter_type, self) {
591            (Type::NullOrParameter(t), Type::ScalarValue(data))
592                if data.is_known() && data.value == 0 =>
593            {
594                Ok(())
595            }
596            (Type::NullOrParameter(t), _) => self.match_parameter_type(context, t, index, next),
597            (Type::ScalarValueParameter, Type::ScalarValue(data))
598                if data.is_fully_initialized() =>
599            {
600                Ok(())
601            }
602            (Type::ConstPtrToMapParameter, Type::ConstPtrToMap { .. }) => Ok(()),
603            (
604                Type::MapKeyParameter { map_ptr_index },
605                Type::PtrToMemory { offset, buffer_size, .. },
606            ) => {
607                let schema = context.get_map_schema(*map_ptr_index)?;
608                context.check_memory_access(*offset, *buffer_size, 0, schema.key_size as usize)
609            }
610            (Type::MapKeyParameter { map_ptr_index }, Type::PtrToStack { offset }) => {
611                let schema = context.get_map_schema(*map_ptr_index)?;
612                context.stack.read_data_ptr(context.pc, *offset, schema.key_size as u64)
613            }
614            (
615                Type::MapValueParameter { map_ptr_index },
616                Type::PtrToMemory { offset, buffer_size, .. },
617            ) => {
618                let schema = context.get_map_schema(*map_ptr_index)?;
619                context.check_memory_access(*offset, *buffer_size, 0, schema.value_size as usize)
620            }
621            (Type::MapValueParameter { map_ptr_index }, Type::PtrToStack { offset }) => {
622                let schema = context.get_map_schema(*map_ptr_index)?;
623                context.stack.read_data_ptr(context.pc, *offset, schema.value_size as u64)
624            }
625            (Type::MemoryParameter { size, .. }, Type::PtrToMemory { offset, buffer_size, .. }) => {
626                let expected_size = size.size(context)?;
627                let size_left = ScalarValueData::from(*buffer_size) - offset;
628                if expected_size > size_left.min() {
629                    return Err("out of bound read".to_string());
630                }
631                Ok(())
632            }
633
634            (Type::MemoryParameter { size, input, output }, Type::PtrToStack { offset }) => {
635                let size = size.size(context)?;
636                let buffer_end = offset.add(size);
637                if !buffer_end.is_valid() {
638                    Err("out of bound access".to_string())
639                } else {
640                    if *output {
641                        next.stack.write_data_ptr(context.pc, *offset, size)?;
642                    }
643                    if *input {
644                        context.stack.read_data_ptr(context.pc, *offset, size)?;
645                    }
646                    Ok(())
647                }
648            }
649            (
650                Type::StructParameter { id: id1 },
651                Type::PtrToMemory { id: id2, offset, .. }
652                | Type::PtrToStruct { id: id2, offset, .. },
653            ) if offset.is_zero() && id1.matches(id2) => Ok(()),
654            (
655                Type::ReleasableParameter { id: id1, inner: inner1 },
656                Type::Releasable { id: id2, inner: inner2 },
657            ) if id2.has_parent(id1) => {
658                if next.resources.contains(id2) {
659                    inner2.match_parameter_type(context, inner1, index, next)
660                } else {
661                    Err(format!("Resource already released for index {index}"))
662                }
663            }
664            (Type::ReleaseParameter { id: id1 }, Type::Releasable { id: id2, .. })
665                if id2.has_parent(id1) =>
666            {
667                if next.resources.remove(id2) {
668                    Ok(())
669                } else {
670                    Err(format!("{id2:?} Resource already released for index {index}"))
671                }
672            }
673            (_, Type::Releasable { inner, .. }) => {
674                inner.match_parameter_type(context, parameter_type, index, next)
675            }
676            (Type::AnyParameter, _) => Ok(()),
677
678            _ => Err(format!("incorrect parameter for index {index}")),
679        }
680    }
681
682    /// If this `Type` is an instance of NullOr with the given `null_id`, replace it wither either
683    /// 0 or the subtype depending on `is_null`
684    fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
685        match self {
686            Type::NullOr { id, inner } if id == null_id => {
687                if is_null {
688                    *self = Type::from(0);
689                } else {
690                    *self = *inner.clone();
691                }
692            }
693            _ => {}
694        }
695    }
696
697    fn register_resource(&self, context: &mut ComputationContext) {
698        match self {
699            Type::Releasable { id, .. } => {
700                context.resources.insert(id.clone());
701            }
702            _ => {}
703        }
704    }
705
706    /// Partially Compares two iterators of comparable items.
707    ///
708    /// This function iterates through both input iterators simultaneously and compares the corresponding elements.
709    /// The comparison continues until:
710    /// 1. Both iterators are exhausted and all elements were considered equal, in which case it returns `Some(Ordering::Equal)`.
711    /// 2. All pairs of corresponding elements that are not equal have the same ordering (`Ordering::Less` or `Ordering::Greater`), in which case it returns `Some(Ordering)` reflecting that consistent ordering.
712    /// 3. One iterator is exhausted before the other, or any comparison between elements yields `None`, or not all non-equal pairs have the same ordering, in which case it returns `None`.
713    fn compare_list<'a>(
714        mut l1: impl Iterator<Item = &'a Self>,
715        mut l2: impl Iterator<Item = &'a Self>,
716    ) -> Option<Ordering> {
717        let mut result = Ordering::Equal;
718        loop {
719            match (l1.next(), l2.next()) {
720                (None, None) => return Some(result),
721                (_, None) | (None, _) => return None,
722                (Some(v1), Some(v2)) => {
723                    result = associate_orderings(result, v1.partial_cmp(v2)?)?;
724                }
725            }
726        }
727    }
728}
729
730#[derive(Clone, Debug)]
731pub struct FunctionSignature {
732    pub args: Vec<Type>,
733    pub return_value: Type,
734    pub invalidate_array_bounds: bool,
735}
736
737#[derive(Debug, Default)]
738pub struct CallingContext {
739    /// List of map schemas of the associated map. The maps can be accessed using LDDW instruction
740    /// with `src_reg=BPF_PSEUDO_MAP_IDX`.
741    pub maps: Vec<MapSchema>,
742    /// The registered external functions.
743    pub helpers: HashMap<u32, FunctionSignature>,
744    /// The args of the program.
745    pub args: Vec<Type>,
746    /// Packet type. Normally it should be either `None` or `args[0]`.
747    pub packet_type: Option<Type>,
748}
749
750impl CallingContext {
751    pub fn register_map(&mut self, schema: MapSchema) -> usize {
752        let index = self.maps.len();
753        self.maps.push(schema);
754        index
755    }
756    pub fn set_helpers(&mut self, helpers: HashMap<u32, FunctionSignature>) {
757        self.helpers = helpers;
758    }
759    pub fn set_args(&mut self, args: &[Type]) {
760        debug_assert!(args.len() <= 5);
761        self.args = args.to_vec();
762    }
763    pub fn set_packet_type(&mut self, packet_type: Type) {
764        self.packet_type = Some(packet_type);
765    }
766}
767
768#[derive(Debug, PartialEq, Clone)]
769pub struct StructAccess {
770    pub pc: ProgramCounter,
771
772    // Memory Id of the struct being accessed.
773    pub memory_id: MemoryId,
774
775    // Offset of the field being loaded.
776    pub field_offset: usize,
777
778    // Indicates that this is a 32-bit pointer load. These instructions must be remapped.
779    pub is_32_bit_ptr_load: bool,
780}
781
782#[derive(Debug, Clone)]
783pub struct VerifiedEbpfProgram {
784    pub(crate) code: Vec<EbpfInstruction>,
785    pub(crate) args: Vec<Type>,
786    pub(crate) struct_access_instructions: Vec<StructAccess>,
787    pub(crate) maps: Vec<MapSchema>,
788}
789
790impl VerifiedEbpfProgram {
791    // Convert the program to raw code. Can be used only when the program doesn't access any
792    // structs and maps.
793    pub fn to_code(self) -> Vec<EbpfInstruction> {
794        debug_assert!(self.struct_access_instructions.is_empty());
795        debug_assert!(self.maps.is_empty());
796        self.code
797    }
798
799    pub fn code(&self) -> &[EbpfInstruction] {
800        &self.code
801    }
802
803    pub fn struct_access_instructions(&self) -> &[StructAccess] {
804        &self.struct_access_instructions
805    }
806
807    pub fn from_verified_code(
808        code: Vec<EbpfInstruction>,
809        args: Vec<Type>,
810        struct_access_instructions: Vec<StructAccess>,
811        maps: Vec<MapSchema>,
812    ) -> Self {
813        Self { code, args, struct_access_instructions, maps }
814    }
815
816    pub fn maps(&self) -> &[MapSchema] {
817        &self.maps
818    }
819}
820
821/// Verify the given code depending on the type of the parameters and the registered external
822/// functions. Returned `VerifiedEbpfProgram` should be linked in order to execute it.
823pub fn verify_program(
824    code: Vec<EbpfInstruction>,
825    calling_context: CallingContext,
826    logger: &mut dyn VerifierLogger,
827) -> Result<VerifiedEbpfProgram, EbpfError> {
828    if code.len() > BPF_MAX_INSTS {
829        return error_and_log(logger, "ebpf program too long");
830    }
831
832    let mut context = ComputationContext::default();
833    for (i, t) in calling_context.args.iter().enumerate() {
834        // The parameter registers are r1 to r5.
835        context.set_reg((i + 1) as u8, t.clone()).map_err(EbpfError::ProgramVerifyError)?;
836    }
837    let states = vec![context];
838    let mut verification_context = VerificationContext {
839        calling_context,
840        logger,
841        states,
842        code: &code,
843        counter: 0,
844        iteration: 0,
845        terminating_contexts: Default::default(),
846        struct_access_instructions: Default::default(),
847    };
848    while let Some(mut context) = verification_context.states.pop() {
849        if let Some(terminating_contexts) =
850            verification_context.terminating_contexts.get(&context.pc)
851        {
852            // Check whether there exist a context that terminate and prove that this context does
853            // also terminate.
854            if let Some(ending_context) =
855                terminating_contexts.iter().find(|c| c.computation_context >= context)
856            {
857                // One such context has been found, this proves the current context terminates.
858                // If the context has a parent, register the data dependencies and try to terminate
859                // it.
860                if let Some(parent) = context.parent.take() {
861                    parent.dependencies.lock().push(ending_context.dependencies.clone());
862                    if let Some(parent) = Arc::into_inner(parent) {
863                        parent
864                            .terminate(&mut verification_context)
865                            .map_err(EbpfError::ProgramVerifyError)?;
866                    }
867                }
868                continue;
869            }
870        }
871        if verification_context.iteration > 10 * BPF_MAX_INSTS {
872            return error_and_log(verification_context.logger, "bpf byte code does not terminate");
873        }
874        if context.pc >= code.len() {
875            return error_and_log(verification_context.logger, "pc out of bounds");
876        }
877        let visit_result = context.visit(&mut verification_context, code[context.pc]);
878        match visit_result {
879            Err(message) => {
880                let message = format!("at PC {}: {}", context.pc, message);
881                return error_and_log(verification_context.logger, message);
882            }
883            _ => {}
884        }
885        verification_context.iteration += 1;
886    }
887
888    let struct_access_instructions =
889        verification_context.struct_access_instructions.into_values().collect::<Vec<_>>();
890    let CallingContext { maps, args, .. } = verification_context.calling_context;
891    Ok(VerifiedEbpfProgram { code, struct_access_instructions, maps, args })
892}
893
894struct VerificationContext<'a> {
895    /// The type information for the program arguments and the registered functions.
896    calling_context: CallingContext,
897    /// The logger to use.
898    logger: &'a mut dyn VerifierLogger,
899    /// The `ComputationContext` yet to be validated.
900    states: Vec<ComputationContext>,
901    /// The program being analyzed.
902    code: &'a [EbpfInstruction],
903    /// A counter used to generated unique ids for memory buffers and maps.
904    counter: u64,
905    /// The current iteration of the verifier. Used to ensure termination by limiting the number of
906    /// iteration before bailing out.
907    iteration: usize,
908    /// Keep track of the context that terminates at a given pc. The list of context will all be
909    /// incomparables as each time a bigger context is computed, the smaller ones are removed from
910    /// the list.
911    terminating_contexts: BTreeMap<ProgramCounter, Vec<TerminatingContext>>,
912    /// The current set of struct access instructions that will need to be updated when the
913    /// program is linked. This is also used to ensure that a given instruction always loads the
914    /// same field. If this is not the case, the verifier will reject the program.
915    struct_access_instructions: HashMap<ProgramCounter, StructAccess>,
916}
917
918impl<'a> VerificationContext<'a> {
919    fn next_id(&mut self) -> u64 {
920        let id = self.counter;
921        self.counter += 1;
922        id
923    }
924
925    /// Register an instruction that loads or stores a struct field. These instructions will need
926    /// to updated later when the program is linked.
927    fn register_struct_access(&mut self, struct_access: StructAccess) -> Result<(), String> {
928        match self.struct_access_instructions.entry(struct_access.pc) {
929            std::collections::hash_map::Entry::Vacant(entry) => {
930                entry.insert(struct_access);
931            }
932            std::collections::hash_map::Entry::Occupied(entry) => {
933                if *entry.get() != struct_access {
934                    return Err("Inconsistent struct field access".to_string());
935                }
936            }
937        }
938        Ok(())
939    }
940}
941
942const STACK_ELEMENT_SIZE: usize = std::mem::size_of::<u64>();
943const STACK_MAX_INDEX: usize = BPF_STACK_SIZE / STACK_ELEMENT_SIZE;
944
945/// An offset inside the stack. The offset is from the end of the stack.
946/// downward.
947#[derive(Clone, Copy, Debug, PartialEq)]
948pub struct StackOffset(ScalarValueData);
949
950impl Default for StackOffset {
951    fn default() -> Self {
952        Self(BPF_STACK_SIZE.into())
953    }
954}
955
956impl StackOffset {
957    /// Whether the current offset is valid.
958    fn is_valid(&self) -> bool {
959        self.0.is_known() && self.0.value <= (BPF_STACK_SIZE as u64)
960    }
961
962    /// The value of the register.
963    fn reg(&self) -> ScalarValueData {
964        self.0
965    }
966
967    /// The index into the stack array this offset points to. Can be called only if `is_valid()`
968    /// is true.
969    fn array_index(&self) -> usize {
970        debug_assert!(self.is_valid());
971        usize::try_from(self.0.value).unwrap() / STACK_ELEMENT_SIZE
972    }
973
974    /// The offset inside the aligned u64 in the stack.
975    fn sub_index(&self) -> usize {
976        debug_assert!(self.is_valid());
977        usize::try_from(self.0.value).unwrap() % STACK_ELEMENT_SIZE
978    }
979
980    fn add<T: Into<ScalarValueData>>(self, rhs: T) -> Self {
981        Self(self.0 + rhs)
982    }
983}
984
985/// The state of the stack
986#[derive(Clone, Debug, Default, PartialEq)]
987struct Stack {
988    data: HashMap<usize, Type>,
989}
990
991impl Stack {
992    /// Replace all instances of the NullOr type with the given `null_id` to either 0 or the
993    /// subtype depending on `is_null`
994    fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
995        for (_, t) in self.data.iter_mut() {
996            t.set_null(null_id, is_null);
997        }
998    }
999
1000    fn get(&self, index: usize) -> &Type {
1001        self.data.get(&index).unwrap_or(&Type::UNINITIALIZED)
1002    }
1003
1004    fn set(&mut self, index: usize, t: Type) {
1005        if t == Type::UNINITIALIZED {
1006            self.data.remove(&index);
1007        } else {
1008            self.data.insert(index, t);
1009        }
1010    }
1011
1012    fn extract_sub_value(value: u64, offset: usize, byte_count: usize) -> u64 {
1013        NativeEndian::read_uint(&value.as_bytes()[offset..], byte_count)
1014    }
1015
1016    fn insert_sub_value(mut original: u64, value: u64, width: DataWidth, offset: usize) -> u64 {
1017        let byte_count = width.bytes();
1018        let original_buf = original.as_mut_bytes();
1019        let value_buf = value.as_bytes();
1020        original_buf[offset..(byte_count + offset)].copy_from_slice(&value_buf[..byte_count]);
1021        original
1022    }
1023
1024    fn write_data_ptr(
1025        &mut self,
1026        pc: ProgramCounter,
1027        mut offset: StackOffset,
1028        bytes: u64,
1029    ) -> Result<(), String> {
1030        for i in 0..bytes {
1031            self.store(offset, Type::UNKNOWN_SCALAR, DataWidth::U8)?;
1032            offset = offset.add(1);
1033        }
1034        Ok(())
1035    }
1036
1037    fn read_data_ptr(
1038        &self,
1039        pc: ProgramCounter,
1040        offset: StackOffset,
1041        bytes: u64,
1042    ) -> Result<(), String> {
1043        let read_element =
1044            |index: usize, start_offset: usize, end_offset: usize| -> Result<(), String> {
1045                match self.get(index) {
1046                    Type::ScalarValue(data) => {
1047                        debug_assert!(end_offset > start_offset);
1048                        let unwritten_bits = Self::extract_sub_value(
1049                            data.unwritten_mask,
1050                            start_offset,
1051                            end_offset - start_offset,
1052                        );
1053                        if unwritten_bits == 0 {
1054                            Ok(())
1055                        } else {
1056                            Err("reading unwritten value from the stack".to_string())
1057                        }
1058                    }
1059                    _ => Err("invalid read from the stack".to_string()),
1060                }
1061            };
1062        if bytes == 0 {
1063            return Ok(());
1064        }
1065
1066        if bytes as usize > BPF_STACK_SIZE {
1067            return Err("stack overflow".to_string());
1068        }
1069
1070        if !offset.is_valid() {
1071            return Err("invalid stack offset".to_string());
1072        }
1073
1074        let end_offset = offset.add(bytes);
1075        if !end_offset.is_valid() {
1076            return Err("stack overflow".to_string())?;
1077        }
1078
1079        // Handle the case where all the data is contained in a single element excluding the last
1080        // byte (the case when the read ends at an element edge, i.e. `end_offset.sub_index()==0`,
1081        // is covered by the default path below).
1082        if offset.array_index() == end_offset.array_index() {
1083            return read_element(offset.array_index(), offset.sub_index(), end_offset.sub_index());
1084        }
1085
1086        // Handle the first element, that might be partial.
1087        read_element(offset.array_index(), offset.sub_index(), STACK_ELEMENT_SIZE)?;
1088
1089        // Handle the last element, that might be partial.
1090        if end_offset.sub_index() != 0 {
1091            read_element(end_offset.array_index(), 0, end_offset.sub_index())?;
1092        }
1093
1094        // Handle the any full type between beginning and end.
1095        for i in (offset.array_index() + 1)..end_offset.array_index() {
1096            read_element(i, 0, STACK_ELEMENT_SIZE)?;
1097        }
1098
1099        Ok(())
1100    }
1101
1102    fn store(&mut self, offset: StackOffset, value: Type, width: DataWidth) -> Result<(), String> {
1103        if !offset.is_valid() {
1104            return Err("out of bounds store".to_string());
1105        }
1106        if offset.sub_index() % width.bytes() != 0 {
1107            return Err("misaligned access".to_string());
1108        }
1109
1110        let index = offset.array_index();
1111        if width == DataWidth::U64 {
1112            self.set(index, value);
1113        } else {
1114            match value {
1115                Type::ScalarValue(data) => {
1116                    let old_data = match self.get(index) {
1117                        Type::ScalarValue(data) => *data,
1118                        _ => {
1119                            // The value in the stack is not a scalar. Let consider it an scalar
1120                            // value with no written bits.
1121                            ScalarValueData::UNINITIALIZED
1122                        }
1123                    };
1124                    let sub_index = offset.sub_index();
1125                    let value =
1126                        Self::insert_sub_value(old_data.value, data.value, width, sub_index);
1127                    let unknown_mask = Self::insert_sub_value(
1128                        old_data.unknown_mask,
1129                        data.unknown_mask,
1130                        width,
1131                        sub_index,
1132                    );
1133                    let unwritten_mask = Self::insert_sub_value(
1134                        old_data.unwritten_mask,
1135                        data.unwritten_mask,
1136                        width,
1137                        sub_index,
1138                    );
1139                    let urange = U64Range::compute_range_for_bytes_swap(
1140                        old_data.urange,
1141                        data.urange,
1142                        sub_index,
1143                        0,
1144                        width.bytes(),
1145                    );
1146                    self.set(
1147                        index,
1148                        Type::ScalarValue(ScalarValueData::new(
1149                            value,
1150                            unknown_mask,
1151                            unwritten_mask,
1152                            urange,
1153                        )),
1154                    );
1155                }
1156                _ => {
1157                    return Err("cannot store part of a non scalar value on the stack".to_string());
1158                }
1159            }
1160        }
1161        Ok(())
1162    }
1163
1164    fn load(&self, offset: StackOffset, width: DataWidth) -> Result<Type, String> {
1165        if !offset.is_valid() {
1166            return Err("out of bounds load".to_string());
1167        }
1168        if offset.array_index() >= STACK_MAX_INDEX {
1169            return Err("out of bounds load".to_string());
1170        }
1171        if offset.sub_index() % width.bytes() != 0 {
1172            return Err("misaligned access".to_string());
1173        }
1174
1175        let index = offset.array_index();
1176        let loaded_type = self.get(index).clone();
1177        let result = if width == DataWidth::U64 {
1178            loaded_type
1179        } else {
1180            match loaded_type {
1181                Type::ScalarValue(data) => {
1182                    let sub_index = offset.sub_index();
1183                    let value = Self::extract_sub_value(data.value, sub_index, width.bytes());
1184                    let unknown_mask =
1185                        Self::extract_sub_value(data.unknown_mask, sub_index, width.bytes());
1186                    let unwritten_mask =
1187                        Self::extract_sub_value(data.unwritten_mask, sub_index, width.bytes());
1188                    let urange = U64Range::compute_range_for_bytes_swap(
1189                        0.into(),
1190                        data.urange,
1191                        0,
1192                        sub_index,
1193                        width.bytes(),
1194                    );
1195                    Type::ScalarValue(ScalarValueData::new(
1196                        value,
1197                        unknown_mask,
1198                        unwritten_mask,
1199                        urange,
1200                    ))
1201                }
1202                _ => return Err(format!("incorrect load of {} bytes", width.bytes())),
1203            }
1204        };
1205        if !result.is_initialized() {
1206            return Err("reading unwritten value from the stack".to_string());
1207        }
1208        Ok(result)
1209    }
1210}
1211
1212/// Two types are ordered with `t1` > `t2` if a proof that a program in a state `t1` finish is also
1213/// a proof that a program in a state `t2` finish.
1214impl PartialOrd for Stack {
1215    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1216        let mut result = Ordering::Equal;
1217        let mut data_iter1 = self.data.iter().peekable();
1218        let mut data_iter2 = other.data.iter().peekable();
1219        loop {
1220            let k1 = data_iter1.peek().map(|(k, _)| *k);
1221            let k2 = data_iter2.peek().map(|(k, _)| *k);
1222            let k = match (k1, k2) {
1223                (None, None) => return Some(result),
1224                (Some(k), None) => {
1225                    data_iter1.next();
1226                    *k
1227                }
1228                (None, Some(k)) => {
1229                    data_iter2.next();
1230                    *k
1231                }
1232                (Some(k1), Some(k2)) => {
1233                    if k1 <= k2 {
1234                        data_iter1.next();
1235                    }
1236                    if k2 <= k1 {
1237                        data_iter2.next();
1238                    }
1239                    *std::cmp::min(k1, k2)
1240                }
1241            };
1242            result = associate_orderings(result, self.get(k).partial_cmp(other.get(k))?)?;
1243        }
1244    }
1245}
1246
1247macro_rules! bpf_log {
1248    ($context:ident, $verification_context:ident, $($msg:tt)*) => {
1249        let prefix = format!("{}: ({:02x})", $context.pc, $verification_context.code[$context.pc].code());
1250        let suffix = format!($($msg)*);
1251        $verification_context.logger.log(format!("{prefix} {suffix}").as_bytes());
1252    }
1253}
1254
1255/// The state of the computation as known by the verifier at a given point in time.
1256#[derive(Debug, Default)]
1257struct ComputationContext {
1258    /// The program counter.
1259    pc: ProgramCounter,
1260    /// Register 0 to 9.
1261    registers: [Type; GENERAL_REGISTER_COUNT as usize],
1262    /// The state of the stack.
1263    stack: Stack,
1264    /// The dynamically known bounds of buffers indexed by their ids.
1265    array_bounds: BTreeMap<MemoryId, u64>,
1266    /// The currently allocated resources.
1267    resources: HashSet<MemoryId>,
1268    /// The previous context in the computation.
1269    parent: Option<Arc<ComputationContext>>,
1270    /// The data dependencies of this context. This is used to broaden a known ending context to
1271    /// help cutting computation branches.
1272    dependencies: Mutex<Vec<DataDependencies>>,
1273}
1274
1275impl Clone for ComputationContext {
1276    fn clone(&self) -> Self {
1277        Self {
1278            pc: self.pc,
1279            registers: self.registers.clone(),
1280            stack: self.stack.clone(),
1281            array_bounds: self.array_bounds.clone(),
1282            resources: self.resources.clone(),
1283            parent: self.parent.clone(),
1284            // dependencies are erased as they must always be used on the same instance of the
1285            // context.
1286            dependencies: Default::default(),
1287        }
1288    }
1289}
1290
1291/// parent and dependencies are ignored for the comparison, as they do not matter for termination.
1292impl PartialEq for ComputationContext {
1293    fn eq(&self, other: &Self) -> bool {
1294        self.pc == other.pc
1295            && self.registers == other.registers
1296            && self.stack == other.stack
1297            && self.array_bounds == other.array_bounds
1298    }
1299}
1300
1301impl ComputationContext {
1302    /// Replace all instances of the NullOr type with the given `null_id` to either 0 or the
1303    /// subtype depending on `is_null`
1304    fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
1305        for i in 0..self.registers.len() {
1306            self.registers[i].set_null(null_id, is_null);
1307        }
1308        self.stack.set_null(null_id, is_null);
1309    }
1310
1311    fn reg(&self, index: Register) -> Result<Type, String> {
1312        if index >= REGISTER_COUNT {
1313            return Err(format!("R{index} is invalid"));
1314        }
1315        if index < GENERAL_REGISTER_COUNT {
1316            Ok(self.registers[index as usize].clone())
1317        } else {
1318            Ok(Type::PtrToStack { offset: StackOffset::default() })
1319        }
1320    }
1321
1322    fn set_reg(&mut self, index: Register, reg_type: Type) -> Result<(), String> {
1323        if index >= GENERAL_REGISTER_COUNT {
1324            return Err(format!("R{index} is invalid"));
1325        }
1326        self.registers[index as usize] = reg_type;
1327        Ok(())
1328    }
1329
1330    fn update_array_bounds(&mut self, id: MemoryId, new_bound: ScalarValueData) {
1331        let new_bound_min = new_bound.min();
1332        self.array_bounds
1333            .entry(id)
1334            .and_modify(|v| *v = std::cmp::max(*v, new_bound_min))
1335            .or_insert(new_bound_min);
1336    }
1337
1338    fn get_map_schema(&self, argument: u8) -> Result<MapSchema, String> {
1339        match self.reg(argument + 1)? {
1340            Type::ConstPtrToMap { schema, .. } => Ok(schema),
1341            _ => Err(format!("No map found at argument {argument}")),
1342        }
1343    }
1344
1345    fn next(&self) -> Result<Self, String> {
1346        let parent = Some(Arc::new(self.clone()));
1347        self.jump_with_offset(0, parent)
1348    }
1349
1350    /// Returns a new `ComputationContext` where the pc has jump by `offset + 1`. In particular,
1351    /// the next instruction is reached with `jump_with_offset(0)`.
1352    fn jump_with_offset(&self, offset: i16, parent: Option<Arc<Self>>) -> Result<Self, String> {
1353        let pc = self
1354            .pc
1355            .checked_add_signed(offset.into())
1356            .and_then(|v| v.checked_add_signed(1))
1357            .ok_or_else(|| "jump outside of program".to_string())?;
1358        let result = Self {
1359            pc,
1360            registers: self.registers.clone(),
1361            stack: self.stack.clone(),
1362            array_bounds: self.array_bounds.clone(),
1363            resources: self.resources.clone(),
1364            parent,
1365            dependencies: Default::default(),
1366        };
1367        Ok(result)
1368    }
1369
1370    fn check_memory_access(
1371        &self,
1372        dst_offset: ScalarValueData,
1373        dst_buffer_size: u64,
1374        instruction_offset: i16,
1375        width: usize,
1376    ) -> Result<(), String> {
1377        let memory_range = dst_offset.urange + instruction_offset + U64Range::new(0, width as u64);
1378        if memory_range.max > dst_buffer_size {
1379            return Err("out of bound access".to_string());
1380        }
1381        Ok(())
1382    }
1383
1384    fn store_memory(
1385        &mut self,
1386        context: &mut VerificationContext<'_>,
1387        addr: &Type,
1388        field: Field,
1389        value: Type,
1390    ) -> Result<(), String> {
1391        let addr = addr.inner(self)?;
1392        match *addr {
1393            Type::PtrToStack { offset } => {
1394                let offset_sum = offset.add(field.offset);
1395                return self.stack.store(offset_sum, value, field.width);
1396            }
1397            Type::PtrToMemory { offset, buffer_size, .. } => {
1398                self.check_memory_access(offset, buffer_size, field.offset, field.width.bytes())?;
1399            }
1400            Type::PtrToStruct { ref id, offset, ref descriptor, .. } => {
1401                let field_desc = descriptor
1402                    .find_field(offset, field)
1403                    .ok_or_else(|| "incorrect store".to_string())?;
1404
1405                if !matches!(field_desc.field_type, FieldType::MutableScalar { .. }) {
1406                    return Err("store to a read-only field".to_string());
1407                }
1408
1409                context.register_struct_access(StructAccess {
1410                    pc: self.pc,
1411                    memory_id: id.clone(),
1412                    field_offset: field_desc.offset,
1413                    is_32_bit_ptr_load: false,
1414                })?;
1415            }
1416            Type::PtrToArray { ref id, offset } => {
1417                self.check_memory_access(
1418                    offset,
1419                    *self.array_bounds.get(&id).unwrap_or(&0),
1420                    field.offset,
1421                    field.width.bytes(),
1422                )?;
1423            }
1424            _ => return Err("incorrect store".to_string()),
1425        }
1426
1427        match value {
1428            Type::ScalarValue(data) if data.is_fully_initialized() => {}
1429            // Private data should not be leaked.
1430            _ => return Err("incorrect store".to_string()),
1431        }
1432        Ok(())
1433    }
1434
1435    fn load_memory(
1436        &self,
1437        context: &mut VerificationContext<'_>,
1438        addr: &Type,
1439        field: Field,
1440    ) -> Result<Type, String> {
1441        let addr = addr.inner(self)?;
1442        match *addr {
1443            Type::PtrToStack { offset } => {
1444                let offset_sum = offset.add(field.offset);
1445                self.stack.load(offset_sum, field.width)
1446            }
1447            Type::PtrToMemory { ref id, offset, buffer_size, .. } => {
1448                self.check_memory_access(offset, buffer_size, field.offset, field.width.bytes())?;
1449                Ok(Type::UNKNOWN_SCALAR)
1450            }
1451            Type::PtrToStruct { ref id, offset, ref descriptor, .. } => {
1452                let field_desc = descriptor
1453                    .find_field(offset, field)
1454                    .ok_or_else(|| "incorrect load".to_string())?;
1455
1456                let (return_type, is_32_bit_ptr_load) = match &field_desc.field_type {
1457                    FieldType::Scalar { .. } | FieldType::MutableScalar { .. } => {
1458                        (Type::UNKNOWN_SCALAR, false)
1459                    }
1460                    FieldType::PtrToArray { id: array_id, is_32_bit } => (
1461                        Type::PtrToArray { id: array_id.prepended(id.clone()), offset: 0.into() },
1462                        *is_32_bit,
1463                    ),
1464                    FieldType::PtrToEndArray { id: array_id, is_32_bit } => {
1465                        (Type::PtrToEndArray { id: array_id.prepended(id.clone()) }, *is_32_bit)
1466                    }
1467                    FieldType::PtrToMemory { id: memory_id, buffer_size, is_32_bit } => (
1468                        Type::PtrToMemory {
1469                            id: memory_id.prepended(id.clone()),
1470                            offset: 0.into(),
1471                            buffer_size: *buffer_size as u64,
1472                        },
1473                        *is_32_bit,
1474                    ),
1475                    FieldType::NullablePtrToMemory { id: memory_id, buffer_size, is_32_bit } => {
1476                        let id = memory_id.prepended(id.clone());
1477                        (
1478                            Type::NullOr {
1479                                id: id.clone(),
1480                                inner: Box::new(Type::PtrToMemory {
1481                                    id,
1482                                    offset: 0.into(),
1483                                    buffer_size: *buffer_size as u64,
1484                                }),
1485                            },
1486                            *is_32_bit,
1487                        )
1488                    }
1489                };
1490
1491                context.register_struct_access(StructAccess {
1492                    pc: self.pc,
1493                    memory_id: id.clone(),
1494                    field_offset: field_desc.offset,
1495                    is_32_bit_ptr_load,
1496                })?;
1497
1498                Ok(return_type)
1499            }
1500            Type::PtrToArray { ref id, offset } => {
1501                self.check_memory_access(
1502                    offset,
1503                    *self.array_bounds.get(&id).unwrap_or(&0),
1504                    field.offset,
1505                    field.width.bytes(),
1506                )?;
1507                Ok(Type::UNKNOWN_SCALAR)
1508            }
1509            _ => Err("incorrect load".to_string()),
1510        }
1511    }
1512
1513    /**
1514     * Given the given `return_value` in a method signature, return the concrete type to use,
1515     * updating the `next` context is needed.
1516     *
1517     * `maybe_null` is true is the computed value will be a descendant of a `NullOr` type.
1518     */
1519    fn resolve_return_value(
1520        &self,
1521        verification_context: &mut VerificationContext<'_>,
1522        return_value: &Type,
1523        next: &mut ComputationContext,
1524        maybe_null: bool,
1525    ) -> Result<Type, String> {
1526        match return_value {
1527            Type::AliasParameter { parameter_index } => self.reg(parameter_index + 1),
1528            Type::ReleasableParameter { id, inner } => {
1529                let id = MemoryId::from(verification_context.next_id()).prepended(id.clone());
1530                if !maybe_null {
1531                    next.resources.insert(id.clone());
1532                }
1533                Ok(Type::Releasable {
1534                    id,
1535                    inner: Box::new(self.resolve_return_value(
1536                        verification_context,
1537                        inner,
1538                        next,
1539                        maybe_null,
1540                    )?),
1541                })
1542            }
1543            Type::NullOrParameter(t) => {
1544                let id = verification_context.next_id();
1545                Ok(Type::NullOr {
1546                    id: id.into(),
1547                    inner: Box::new(self.resolve_return_value(
1548                        verification_context,
1549                        t,
1550                        next,
1551                        true,
1552                    )?),
1553                })
1554            }
1555            Type::MapValueParameter { map_ptr_index } => {
1556                let schema = self.get_map_schema(*map_ptr_index)?;
1557                let id = verification_context.next_id();
1558                Ok(Type::PtrToMemory {
1559                    id: id.into(),
1560                    offset: 0.into(),
1561                    buffer_size: schema.value_size as u64,
1562                })
1563            }
1564            Type::MemoryParameter { size, .. } => {
1565                let buffer_size = size.size(self)?;
1566                let id = verification_context.next_id();
1567                Ok(Type::PtrToMemory { id: id.into(), offset: 0.into(), buffer_size })
1568            }
1569            t => Ok(t.clone()),
1570        }
1571    }
1572
1573    fn compute_source(&self, src: Source) -> Result<Type, String> {
1574        match src {
1575            Source::Reg(reg) => self.reg(reg),
1576            Source::Value(v) => Ok(v.into()),
1577        }
1578    }
1579
1580    fn apply_computation(
1581        context: &ComputationContext,
1582        op1: Type,
1583        op2: Type,
1584        alu_type: AluType,
1585        op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1586    ) -> Result<Type, String> {
1587        let result: Type = match (alu_type, op1.inner(context)?, op2.inner(context)?) {
1588            (_, Type::ScalarValue(data1), Type::ScalarValue(data2)) => op(*data1, *data2).into(),
1589            (
1590                AluType::Add,
1591                Type::ScalarValue(_),
1592                Type::PtrToStack { .. } | Type::PtrToMemory { .. } | Type::PtrToStruct { .. },
1593            ) => {
1594                return Self::apply_computation(context, op2, op1, alu_type, op);
1595            }
1596            (alu_type, Type::PtrToStack { offset: x }, Type::ScalarValue(data))
1597                if alu_type.is_ptr_compatible() =>
1598            {
1599                Type::PtrToStack { offset: run_on_stack_offset(*x, |x| op(x, *data)) }
1600            }
1601            (
1602                alu_type,
1603                Type::PtrToMemory { id, offset: x, buffer_size },
1604                Type::ScalarValue(data),
1605            ) if alu_type.is_ptr_compatible() => {
1606                let offset = op(*x, *data);
1607                Type::PtrToMemory { id: id.clone(), offset, buffer_size: *buffer_size }
1608            }
1609            (
1610                alu_type,
1611                Type::PtrToStruct { id, offset: x, descriptor },
1612                Type::ScalarValue(data),
1613            ) if alu_type.is_ptr_compatible() => {
1614                let offset = op(*x, *data);
1615                Type::PtrToStruct { id: id.clone(), offset, descriptor: descriptor.clone() }
1616            }
1617            (AluType::Add, Type::PtrToArray { id, offset: x }, Type::ScalarValue(data)) => {
1618                let offset = x.checked_add(*data).ok_or_else(|| format!("XXX"))?;
1619                Type::PtrToArray { id: id.clone(), offset }
1620            }
1621            (AluType::Sub, Type::PtrToArray { id, offset: x }, Type::ScalarValue(data)) => {
1622                let offset = x.checked_sub(*data).ok_or_else(|| format!("XXX"))?;
1623                Type::PtrToArray { id: id.clone(), offset }
1624            }
1625            (
1626                AluType::Sub,
1627                Type::PtrToMemory { id: id1, offset: x1, .. },
1628                Type::PtrToMemory { id: id2, offset: x2, .. },
1629            )
1630            | (
1631                AluType::Sub,
1632                Type::PtrToStruct { id: id1, offset: x1, .. },
1633                Type::PtrToStruct { id: id2, offset: x2, .. },
1634            )
1635            | (
1636                AluType::Sub,
1637                Type::PtrToArray { id: id1, offset: x1 },
1638                Type::PtrToArray { id: id2, offset: x2 },
1639            ) if id1 == id2 => Type::from(op(*x1, *x2)),
1640            (AluType::Sub, Type::PtrToStack { offset: x1 }, Type::PtrToStack { offset: x2 }) => {
1641                Type::from(op(x1.reg(), x2.reg()))
1642            }
1643            (
1644                AluType::Sub,
1645                Type::PtrToArray { id: id1, .. },
1646                Type::PtrToEndArray { id: id2, .. },
1647            )
1648            | (
1649                AluType::Sub,
1650                Type::PtrToEndArray { id: id1, .. },
1651                Type::PtrToArray { id: id2, .. },
1652            ) if id1 == id2 => Type::UNKNOWN_SCALAR,
1653            _ => Type::default(),
1654        };
1655        Ok(result)
1656    }
1657
1658    fn alu(
1659        &mut self,
1660        op_name: Option<&str>,
1661        verification_context: &mut VerificationContext<'_>,
1662        dst: Register,
1663        src: Source,
1664        alu_type: AluType,
1665        op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1666    ) -> Result<(), String> {
1667        if let Some(op_name) = op_name {
1668            bpf_log!(
1669                self,
1670                verification_context,
1671                "{op_name} {}, {}",
1672                display_register(dst),
1673                display_source(src)
1674            );
1675        }
1676        let op1 = self.reg(dst)?;
1677        let op2 = self.compute_source(src)?;
1678        let result = Self::apply_computation(self, op1, op2, alu_type, op)?;
1679        let mut next = self.next()?;
1680        next.set_reg(dst, result)?;
1681        verification_context.states.push(next);
1682        Ok(())
1683    }
1684
1685    fn log_atomic_operation(
1686        &mut self,
1687        op_name: &str,
1688        verification_context: &mut VerificationContext<'_>,
1689        fetch: bool,
1690        dst: Register,
1691        offset: i16,
1692        src: Register,
1693    ) {
1694        bpf_log!(
1695            self,
1696            verification_context,
1697            "lock {}{} [{}{}], {}",
1698            if fetch { "fetch " } else { "" },
1699            op_name,
1700            display_register(dst),
1701            print_offset(offset),
1702            display_register(src),
1703        );
1704    }
1705
1706    fn raw_atomic_operation(
1707        &mut self,
1708        op_name: &str,
1709        verification_context: &mut VerificationContext<'_>,
1710        width: DataWidth,
1711        fetch: bool,
1712        dst: Register,
1713        offset: i16,
1714        src: Register,
1715        op: impl FnOnce(&ComputationContext, Type, Type) -> Result<Type, String>,
1716    ) -> Result<(), String> {
1717        self.log_atomic_operation(op_name, verification_context, fetch, dst, offset, src);
1718        let addr = self.reg(dst)?;
1719        let value = self.reg(src)?;
1720        let field = Field::new(offset, width);
1721        let loaded_type = self.load_memory(verification_context, &addr, field)?;
1722        let result = op(self, loaded_type.clone(), value)?;
1723        let mut next = self.next()?;
1724        next.store_memory(verification_context, &addr, field, result)?;
1725        if fetch {
1726            next.set_reg(src, loaded_type)?;
1727        }
1728        verification_context.states.push(next);
1729        Ok(())
1730    }
1731
1732    fn atomic_operation(
1733        &mut self,
1734        op_name: &str,
1735        verification_context: &mut VerificationContext<'_>,
1736        width: DataWidth,
1737        fetch: bool,
1738        dst: Register,
1739        offset: i16,
1740        src: Register,
1741        alu_type: AluType,
1742        op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1743    ) -> Result<(), String> {
1744        self.raw_atomic_operation(
1745            op_name,
1746            verification_context,
1747            width,
1748            fetch,
1749            dst,
1750            offset,
1751            src,
1752            |context: &ComputationContext, v1: Type, v2: Type| {
1753                Self::apply_computation(context, v1, v2, alu_type, op)
1754            },
1755        )
1756    }
1757
1758    fn raw_atomic_cmpxchg(
1759        &mut self,
1760        op_name: &str,
1761        verification_context: &mut VerificationContext<'_>,
1762        dst: Register,
1763        offset: i16,
1764        src: Register,
1765        jump_width: JumpWidth,
1766        op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1767    ) -> Result<(), String> {
1768        self.log_atomic_operation(op_name, verification_context, true, dst, offset, src);
1769        let width = match jump_width {
1770            JumpWidth::W32 => DataWidth::U32,
1771            JumpWidth::W64 => DataWidth::U64,
1772        };
1773        let addr = self.reg(dst)?;
1774        let field = Field::new(offset, width);
1775        let dst = self.load_memory(verification_context, &addr, field)?;
1776        let value = self.reg(src)?;
1777        let r0 = self.reg(0)?;
1778        let branch = self.compute_branch(jump_width, &dst, &r0, op)?;
1779        // r0 = dst
1780        if branch.unwrap_or(true) {
1781            let mut next = self.next()?;
1782            let (dst, r0) =
1783                Type::constraint(&mut next, JumpType::Eq, jump_width, dst.clone(), r0.clone())?;
1784            next.set_reg(0, dst)?;
1785            next.store_memory(verification_context, &addr, field, value)?;
1786            verification_context.states.push(next);
1787        }
1788        // r0 != dst
1789        if !branch.unwrap_or(false) {
1790            let mut next = self.next()?;
1791            let (dst, r0) = Type::constraint(&mut next, JumpType::Ne, jump_width, dst, r0)?;
1792            next.set_reg(0, dst.clone())?;
1793            next.store_memory(verification_context, &addr, field, dst)?;
1794            verification_context.states.push(next);
1795        }
1796
1797        Ok(())
1798    }
1799    fn endianness<BO: ByteOrder>(
1800        &mut self,
1801        op_name: &str,
1802        verification_context: &mut VerificationContext<'_>,
1803        dst: Register,
1804        width: DataWidth,
1805    ) -> Result<(), String> {
1806        bpf_log!(self, verification_context, "{op_name}{} {}", width.bits(), display_register(dst),);
1807        let bit_op = |value: u64| match width {
1808            DataWidth::U16 => BO::read_u16((value as u16).as_bytes()) as u64,
1809            DataWidth::U32 => BO::read_u32((value as u32).as_bytes()) as u64,
1810            DataWidth::U64 => BO::read_u64(value.as_bytes()),
1811            _ => {
1812                panic!("Unexpected bit width for endianness operation");
1813            }
1814        };
1815        let value = self.reg(dst)?;
1816        let new_value = match value {
1817            Type::ScalarValue(data) => Type::ScalarValue(ScalarValueData::new(
1818                bit_op(data.value),
1819                bit_op(data.unknown_mask),
1820                bit_op(data.unwritten_mask),
1821                U64Range::max(),
1822            )),
1823            _ => Type::default(),
1824        };
1825        let mut next = self.next()?;
1826        next.set_reg(dst, new_value)?;
1827        verification_context.states.push(next);
1828        Ok(())
1829    }
1830
1831    fn compute_branch(
1832        &self,
1833        jump_width: JumpWidth,
1834        op1: &Type,
1835        op2: &Type,
1836        op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1837    ) -> Result<Option<bool>, String> {
1838        match (jump_width, op1, op2) {
1839            (_, Type::ScalarValue(data1), Type::ScalarValue(data2)) => op(*data1, *data2),
1840            (JumpWidth::W64, Type::ScalarValue(data), Type::NullOr { .. })
1841            | (JumpWidth::W64, Type::NullOr { .. }, Type::ScalarValue(data))
1842                if data.is_zero() =>
1843            {
1844                Ok(None)
1845            }
1846
1847            (JumpWidth::W64, Type::ScalarValue(data), t) if data.is_zero() && t.is_non_zero() => {
1848                op(1.into(), 0.into())
1849            }
1850
1851            (JumpWidth::W64, t, Type::ScalarValue(data)) if data.is_zero() && t.is_non_zero() => {
1852                op(0.into(), 1.into())
1853            }
1854
1855            (JumpWidth::W64, Type::PtrToStack { offset: x }, Type::PtrToStack { offset: y }) => {
1856                op(x.reg(), y.reg())
1857            }
1858
1859            (
1860                JumpWidth::W64,
1861                Type::PtrToMemory { id: id1, offset: x, .. },
1862                Type::PtrToMemory { id: id2, offset: y, .. },
1863            )
1864            | (
1865                JumpWidth::W64,
1866                Type::PtrToStruct { id: id1, offset: x, .. },
1867                Type::PtrToStruct { id: id2, offset: y, .. },
1868            )
1869            | (
1870                JumpWidth::W64,
1871                Type::PtrToArray { id: id1, offset: x, .. },
1872                Type::PtrToArray { id: id2, offset: y, .. },
1873            ) if *id1 == *id2 => op(*x, *y),
1874
1875            (JumpWidth::W64, Type::PtrToArray { id: id1, .. }, Type::PtrToEndArray { id: id2 })
1876            | (JumpWidth::W64, Type::PtrToEndArray { id: id1 }, Type::PtrToArray { id: id2, .. })
1877                if *id1 == *id2 =>
1878            {
1879                Ok(None)
1880            }
1881
1882            _ => Err(()),
1883        }
1884        .map_err(|_| "non permitted comparison".to_string())
1885    }
1886
1887    fn conditional_jump(
1888        &mut self,
1889        op_name: &str,
1890        verification_context: &mut VerificationContext<'_>,
1891        dst: Register,
1892        src: Source,
1893        offset: i16,
1894        jump_type: JumpType,
1895        jump_width: JumpWidth,
1896        op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1897    ) -> Result<(), String> {
1898        bpf_log!(
1899            self,
1900            verification_context,
1901            "{op_name} {}, {}, {}",
1902            display_register(dst),
1903            display_source(src),
1904            if offset == 0 { format!("0") } else { print_offset(offset) },
1905        );
1906        let op1 = self.reg(dst)?;
1907        let op2 = self.compute_source(src.clone())?;
1908        let apply_constraints_and_register = |mut next: Self,
1909                                              jump_type: JumpType|
1910         -> Result<Self, String> {
1911            if jump_type != JumpType::Unknown {
1912                let (new_op1, new_op2) =
1913                    Type::constraint(&mut next, jump_type, jump_width, op1.clone(), op2.clone())?;
1914                if dst < REGISTER_COUNT {
1915                    next.set_reg(dst, new_op1)?;
1916                }
1917                match src {
1918                    Source::Reg(r) => {
1919                        next.set_reg(r, new_op2)?;
1920                    }
1921                    _ => {
1922                        // Nothing to do
1923                    }
1924                }
1925            }
1926            Ok(next)
1927        };
1928        let branch = self.compute_branch(jump_width, &op1, &op2, op)?;
1929        let parent = Some(Arc::new(self.clone()));
1930        if branch.unwrap_or(true) {
1931            // Do the jump
1932            verification_context.states.push(apply_constraints_and_register(
1933                self.jump_with_offset(offset, parent.clone())?,
1934                jump_type,
1935            )?);
1936        }
1937        if !branch.unwrap_or(false) {
1938            // Skip the jump
1939            verification_context.states.push(apply_constraints_and_register(
1940                self.jump_with_offset(0, parent)?,
1941                jump_type.invert(),
1942            )?);
1943        }
1944        Ok(())
1945    }
1946
1947    /// Handles the termination of a `ComputationContext`, performing branch cutting optimization.
1948    ///
1949    /// This method is called when it has been proven that the current context terminates (e.g.,
1950    /// reaches an `exit` instruction).
1951    ///
1952    /// The following steps are performed:
1953    /// 1. **Dependency Calculation:** The data dependencies of the context are computed based on
1954    ///    the dependencies of its terminated children and the instruction at the current PC.
1955    /// 2. **Context Broadening:** The context's state is broadened by clearing registers and stack
1956    ///    slots that are *not* in the calculated data dependencies. This optimization assumes that
1957    ///    data not used by the terminated branch is irrelevant for future execution paths.
1958    /// 3. **Termination Registration:** The broadened context is added to the set of terminating
1959    ///    contexts if it is not less than any existing terminating context at the same PC.
1960    /// 4. **Parent Termination:** If all the children of the current context have terminated,
1961    ///    its parent context is recursively terminated.
1962    fn terminate(self, verification_context: &mut VerificationContext<'_>) -> Result<(), String> {
1963        let mut next = Some(self);
1964        // Because of the potential length of the parent chain, do not use recursion.
1965        while let Some(mut current) = next.take() {
1966            // Take the parent to process it at the end and not keep it in the terminating
1967            // contexts.
1968            let parent = current.parent.take();
1969
1970            // 1. Compute the dependencies of the context using the dependencies of its children
1971            //    and the actual operation.
1972            let mut dependencies = DataDependencies::default();
1973            for dependency in current.dependencies.get_mut().iter() {
1974                dependencies.merge(dependency);
1975            }
1976
1977            dependencies.visit(
1978                &mut DataDependenciesVisitorContext {
1979                    calling_context: &verification_context.calling_context,
1980                    computation_context: &current,
1981                },
1982                verification_context.code[current.pc],
1983            )?;
1984
1985            // 2. Clear the state depending on the dependencies states
1986            for register in 0..GENERAL_REGISTER_COUNT {
1987                if !dependencies.registers.contains(&register) {
1988                    current.set_reg(register, Default::default())?;
1989                }
1990            }
1991            current.stack.data.retain(|k, _| dependencies.stack.contains(k));
1992
1993            // 3. Add the cleared state to the set of `terminating_contexts`
1994            let terminating_contexts =
1995                verification_context.terminating_contexts.entry(current.pc).or_default();
1996            let mut is_dominated = false;
1997            terminating_contexts.retain(|c| match c.computation_context.partial_cmp(&current) {
1998                Some(Ordering::Less) => false,
1999                Some(Ordering::Equal) | Some(Ordering::Greater) => {
2000                    // If the list contains a context greater or equal to the current one, it
2001                    // should not be added.
2002                    is_dominated = true;
2003                    true
2004                }
2005                _ => true,
2006            });
2007            if !is_dominated {
2008                terminating_contexts.push(TerminatingContext {
2009                    computation_context: current,
2010                    dependencies: dependencies.clone(),
2011                });
2012            }
2013
2014            // 4. Register the computed dependencies in our parent, and terminate it if all
2015            //    dependencies has been computed.
2016            if let Some(parent) = parent {
2017                parent.dependencies.lock().push(dependencies);
2018                // To check whether all dependencies have been computed, rely on the fact that the Arc
2019                // count of the parent keep track of how many dependencies are left.
2020                next = Arc::into_inner(parent);
2021            }
2022        }
2023        Ok(())
2024    }
2025}
2026
2027impl Drop for ComputationContext {
2028    fn drop(&mut self) {
2029        let mut next = self.parent.take().and_then(Arc::into_inner);
2030        // Because of the potential length of the parent chain, do not use recursion.
2031        while let Some(mut current) = next {
2032            next = current.parent.take().and_then(Arc::into_inner);
2033        }
2034    }
2035}
2036
2037/// Two types are ordered with `t1` > `t2` if a proof that a program in a state `t1` finish is also
2038/// a proof that a program in a state `t2` finish.
2039impl PartialOrd for ComputationContext {
2040    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2041        if self.pc != other.pc || self.resources != other.resources {
2042            return None;
2043        }
2044        let mut result = self.stack.partial_cmp(&other.stack)?;
2045        result = associate_orderings(
2046            result,
2047            Type::compare_list(self.registers.iter(), other.registers.iter())?,
2048        )?;
2049        let mut array_bound_iter1 = self.array_bounds.iter().peekable();
2050        let mut array_bound_iter2 = other.array_bounds.iter().peekable();
2051        let result = loop {
2052            match (array_bound_iter1.peek().cloned(), array_bound_iter2.peek().cloned()) {
2053                (None, None) => break result,
2054                (None, _) => break associate_orderings(result, Ordering::Greater)?,
2055                (_, None) => break associate_orderings(result, Ordering::Less)?,
2056                (Some((k1, v1)), Some((k2, v2))) => match k1.cmp(k2) {
2057                    Ordering::Equal => {
2058                        array_bound_iter1.next();
2059                        array_bound_iter2.next();
2060                        result = associate_orderings(result, v2.cmp(v1))?;
2061                    }
2062                    v @ Ordering::Less => {
2063                        array_bound_iter1.next();
2064                        result = associate_orderings(result, v)?;
2065                    }
2066                    v @ Ordering::Greater => {
2067                        array_bound_iter2.next();
2068                        result = associate_orderings(result, v)?;
2069                    }
2070                },
2071            }
2072        };
2073        Some(result)
2074    }
2075}
2076
2077/// Represents the read data dependencies of an eBPF program branch.
2078///
2079/// This struct tracks which registers and stack positions are *read* by the
2080/// instructions within a branch of the eBPF program.  This information is used
2081/// during branch cutting optimization to broaden terminating contexts.
2082///
2083/// The verifier assumes that data not read by a terminated branch is irrelevant
2084/// for future execution paths and can be safely cleared.
2085#[derive(Clone, Debug, Default)]
2086struct DataDependencies {
2087    /// The set of registers read by the children of a context.
2088    registers: HashSet<Register>,
2089    /// The stack positions read by the children of a context.
2090    stack: HashSet<usize>,
2091}
2092
2093impl DataDependencies {
2094    fn merge(&mut self, other: &DataDependencies) {
2095        self.registers.extend(other.registers.iter());
2096        self.stack.extend(other.stack.iter());
2097    }
2098
2099    fn alu(&mut self, dst: Register, src: Source) -> Result<(), String> {
2100        // Only do something if the dst is read, otherwise the computation doesn't matter.
2101        if self.registers.contains(&dst) {
2102            if let Source::Reg(src) = src {
2103                self.registers.insert(src);
2104            }
2105        }
2106        Ok(())
2107    }
2108
2109    fn jmp(&mut self, dst: Register, src: Source) -> Result<(), String> {
2110        self.registers.insert(dst);
2111        if let Source::Reg(src) = src {
2112            self.registers.insert(src);
2113        }
2114        Ok(())
2115    }
2116
2117    fn atomic(
2118        &mut self,
2119        context: &ComputationContext,
2120        fetch: bool,
2121        dst: Register,
2122        offset: i16,
2123        src: Register,
2124        width: DataWidth,
2125        is_cmpxchg: bool,
2126    ) -> Result<(), String> {
2127        let mut is_read = false;
2128        if is_cmpxchg && self.registers.contains(&0) {
2129            is_read = true;
2130        }
2131        if fetch && self.registers.contains(&src) {
2132            is_read = true;
2133        }
2134        let addr = context.reg(dst)?;
2135        if let Type::PtrToStack { offset: stack_offset } = addr {
2136            let stack_offset = stack_offset.add(offset);
2137            if !stack_offset.is_valid() {
2138                return Err(format!("Invalid stack offset at {}", context.pc));
2139            }
2140            if is_read || self.stack.contains(&stack_offset.array_index()) {
2141                is_read = true;
2142                self.stack.insert(stack_offset.array_index());
2143            }
2144        }
2145        if is_read {
2146            self.registers.insert(0);
2147            self.registers.insert(src);
2148        }
2149        self.registers.insert(dst);
2150        Ok(())
2151    }
2152}
2153
2154struct DataDependenciesVisitorContext<'a> {
2155    calling_context: &'a CallingContext,
2156    computation_context: &'a ComputationContext,
2157}
2158
2159impl BpfVisitor for DataDependencies {
2160    type Context<'a> = DataDependenciesVisitorContext<'a>;
2161
2162    fn add<'a>(
2163        &mut self,
2164        _context: &mut Self::Context<'a>,
2165        dst: Register,
2166        src: Source,
2167    ) -> Result<(), String> {
2168        self.alu(dst, src)
2169    }
2170    fn add64<'a>(
2171        &mut self,
2172        _context: &mut Self::Context<'a>,
2173        dst: Register,
2174        src: Source,
2175    ) -> Result<(), String> {
2176        self.alu(dst, src)
2177    }
2178    fn and<'a>(
2179        &mut self,
2180        _context: &mut Self::Context<'a>,
2181        dst: Register,
2182        src: Source,
2183    ) -> Result<(), String> {
2184        self.alu(dst, src)
2185    }
2186    fn and64<'a>(
2187        &mut self,
2188        _context: &mut Self::Context<'a>,
2189        dst: Register,
2190        src: Source,
2191    ) -> Result<(), String> {
2192        self.alu(dst, src)
2193    }
2194    fn arsh<'a>(
2195        &mut self,
2196        _context: &mut Self::Context<'a>,
2197        dst: Register,
2198        src: Source,
2199    ) -> Result<(), String> {
2200        self.alu(dst, src)
2201    }
2202    fn arsh64<'a>(
2203        &mut self,
2204        _context: &mut Self::Context<'a>,
2205        dst: Register,
2206        src: Source,
2207    ) -> Result<(), String> {
2208        self.alu(dst, src)
2209    }
2210    fn div<'a>(
2211        &mut self,
2212        _context: &mut Self::Context<'a>,
2213        dst: Register,
2214        src: Source,
2215    ) -> Result<(), String> {
2216        self.alu(dst, src)
2217    }
2218    fn div64<'a>(
2219        &mut self,
2220        _context: &mut Self::Context<'a>,
2221        dst: Register,
2222        src: Source,
2223    ) -> Result<(), String> {
2224        self.alu(dst, src)
2225    }
2226    fn lsh<'a>(
2227        &mut self,
2228        _context: &mut Self::Context<'a>,
2229        dst: Register,
2230        src: Source,
2231    ) -> Result<(), String> {
2232        self.alu(dst, src)
2233    }
2234    fn lsh64<'a>(
2235        &mut self,
2236        _context: &mut Self::Context<'a>,
2237        dst: Register,
2238        src: Source,
2239    ) -> Result<(), String> {
2240        self.alu(dst, src)
2241    }
2242    fn r#mod<'a>(
2243        &mut self,
2244        _context: &mut Self::Context<'a>,
2245        dst: Register,
2246        src: Source,
2247    ) -> Result<(), String> {
2248        self.alu(dst, src)
2249    }
2250    fn mod64<'a>(
2251        &mut self,
2252        _context: &mut Self::Context<'a>,
2253        dst: Register,
2254        src: Source,
2255    ) -> Result<(), String> {
2256        self.alu(dst, src)
2257    }
2258    fn mul<'a>(
2259        &mut self,
2260        _context: &mut Self::Context<'a>,
2261        dst: Register,
2262        src: Source,
2263    ) -> Result<(), String> {
2264        self.alu(dst, src)
2265    }
2266    fn mul64<'a>(
2267        &mut self,
2268        _context: &mut Self::Context<'a>,
2269        dst: Register,
2270        src: Source,
2271    ) -> Result<(), String> {
2272        self.alu(dst, src)
2273    }
2274    fn or<'a>(
2275        &mut self,
2276        _context: &mut Self::Context<'a>,
2277        dst: Register,
2278        src: Source,
2279    ) -> Result<(), String> {
2280        self.alu(dst, src)
2281    }
2282    fn or64<'a>(
2283        &mut self,
2284        _context: &mut Self::Context<'a>,
2285        dst: Register,
2286        src: Source,
2287    ) -> Result<(), String> {
2288        self.alu(dst, src)
2289    }
2290    fn rsh<'a>(
2291        &mut self,
2292        _context: &mut Self::Context<'a>,
2293        dst: Register,
2294        src: Source,
2295    ) -> Result<(), String> {
2296        self.alu(dst, src)
2297    }
2298    fn rsh64<'a>(
2299        &mut self,
2300        _context: &mut Self::Context<'a>,
2301        dst: Register,
2302        src: Source,
2303    ) -> Result<(), String> {
2304        self.alu(dst, src)
2305    }
2306    fn sub<'a>(
2307        &mut self,
2308        _context: &mut Self::Context<'a>,
2309        dst: Register,
2310        src: Source,
2311    ) -> Result<(), String> {
2312        self.alu(dst, src)
2313    }
2314    fn sub64<'a>(
2315        &mut self,
2316        _context: &mut Self::Context<'a>,
2317        dst: Register,
2318        src: Source,
2319    ) -> Result<(), String> {
2320        self.alu(dst, src)
2321    }
2322    fn xor<'a>(
2323        &mut self,
2324        _context: &mut Self::Context<'a>,
2325        dst: Register,
2326        src: Source,
2327    ) -> Result<(), String> {
2328        self.alu(dst, src)
2329    }
2330    fn xor64<'a>(
2331        &mut self,
2332        _context: &mut Self::Context<'a>,
2333        dst: Register,
2334        src: Source,
2335    ) -> Result<(), String> {
2336        self.alu(dst, src)
2337    }
2338
2339    fn mov<'a>(
2340        &mut self,
2341        _context: &mut Self::Context<'a>,
2342        dst: Register,
2343        src: Source,
2344    ) -> Result<(), String> {
2345        if src == Source::Reg(dst) || !self.registers.contains(&dst) {
2346            return Ok(());
2347        }
2348        if let Source::Reg(src) = src {
2349            self.registers.insert(src);
2350        }
2351        self.registers.remove(&dst);
2352        Ok(())
2353    }
2354    fn mov64<'a>(
2355        &mut self,
2356        context: &mut Self::Context<'a>,
2357        dst: Register,
2358        src: Source,
2359    ) -> Result<(), String> {
2360        self.mov(context, dst, src)
2361    }
2362
2363    fn neg<'a>(&mut self, _context: &mut Self::Context<'a>, _dst: Register) -> Result<(), String> {
2364        // This is reading and writing the same value. This induces no change in dependencies.
2365        Ok(())
2366    }
2367    fn neg64<'a>(
2368        &mut self,
2369        _context: &mut Self::Context<'a>,
2370        _dst: Register,
2371    ) -> Result<(), String> {
2372        // This is reading and writing the same value. This induces no change in dependencies.
2373        Ok(())
2374    }
2375
2376    fn be<'a>(
2377        &mut self,
2378        _context: &mut Self::Context<'a>,
2379        _dst: Register,
2380        _width: DataWidth,
2381    ) -> Result<(), String> {
2382        // This is reading and writing the same value. This induces no change in dependencies.
2383        Ok(())
2384    }
2385    fn le<'a>(
2386        &mut self,
2387        _context: &mut Self::Context<'a>,
2388        _dst: Register,
2389        _width: DataWidth,
2390    ) -> Result<(), String> {
2391        // This is reading and writing the same value. This induces no change in dependencies.
2392        Ok(())
2393    }
2394
2395    fn call_external<'a>(
2396        &mut self,
2397        context: &mut Self::Context<'a>,
2398        index: u32,
2399    ) -> Result<(), String> {
2400        let Some(signature) = context.calling_context.helpers.get(&index).cloned() else {
2401            return Err(format!("unknown external function {}", index));
2402        };
2403        // 0 is overwritten and 1 to 5 are scratch registers
2404        for register in 0..signature.args.len() + 1 {
2405            self.registers.remove(&(register as Register));
2406        }
2407        // 1 to k are parameters.
2408        for register in 0..signature.args.len() {
2409            self.registers.insert((register + 1) as Register);
2410        }
2411        Ok(())
2412    }
2413
2414    fn exit<'a>(&mut self, _context: &mut Self::Context<'a>) -> Result<(), String> {
2415        // This read r0 unconditionally.
2416        self.registers.insert(0);
2417        Ok(())
2418    }
2419
2420    fn jump<'a>(&mut self, _context: &mut Self::Context<'a>, _offset: i16) -> Result<(), String> {
2421        // This doesn't do anything with values.
2422        Ok(())
2423    }
2424
2425    fn jeq<'a>(
2426        &mut self,
2427        _context: &mut Self::Context<'a>,
2428        dst: Register,
2429        src: Source,
2430        offset: i16,
2431    ) -> Result<(), String> {
2432        self.jmp(dst, src)
2433    }
2434    fn jeq64<'a>(
2435        &mut self,
2436        _context: &mut Self::Context<'a>,
2437        dst: Register,
2438        src: Source,
2439        offset: i16,
2440    ) -> Result<(), String> {
2441        self.jmp(dst, src)
2442    }
2443    fn jne<'a>(
2444        &mut self,
2445        _context: &mut Self::Context<'a>,
2446        dst: Register,
2447        src: Source,
2448        offset: i16,
2449    ) -> Result<(), String> {
2450        self.jmp(dst, src)
2451    }
2452    fn jne64<'a>(
2453        &mut self,
2454        _context: &mut Self::Context<'a>,
2455        dst: Register,
2456        src: Source,
2457        offset: i16,
2458    ) -> Result<(), String> {
2459        self.jmp(dst, src)
2460    }
2461    fn jge<'a>(
2462        &mut self,
2463        _context: &mut Self::Context<'a>,
2464        dst: Register,
2465        src: Source,
2466        offset: i16,
2467    ) -> Result<(), String> {
2468        self.jmp(dst, src)
2469    }
2470    fn jge64<'a>(
2471        &mut self,
2472        _context: &mut Self::Context<'a>,
2473        dst: Register,
2474        src: Source,
2475        offset: i16,
2476    ) -> Result<(), String> {
2477        self.jmp(dst, src)
2478    }
2479    fn jgt<'a>(
2480        &mut self,
2481        _context: &mut Self::Context<'a>,
2482        dst: Register,
2483        src: Source,
2484        offset: i16,
2485    ) -> Result<(), String> {
2486        self.jmp(dst, src)
2487    }
2488    fn jgt64<'a>(
2489        &mut self,
2490        _context: &mut Self::Context<'a>,
2491        dst: Register,
2492        src: Source,
2493        offset: i16,
2494    ) -> Result<(), String> {
2495        self.jmp(dst, src)
2496    }
2497    fn jle<'a>(
2498        &mut self,
2499        _context: &mut Self::Context<'a>,
2500        dst: Register,
2501        src: Source,
2502        offset: i16,
2503    ) -> Result<(), String> {
2504        self.jmp(dst, src)
2505    }
2506    fn jle64<'a>(
2507        &mut self,
2508        _context: &mut Self::Context<'a>,
2509        dst: Register,
2510        src: Source,
2511        offset: i16,
2512    ) -> Result<(), String> {
2513        self.jmp(dst, src)
2514    }
2515    fn jlt<'a>(
2516        &mut self,
2517        _context: &mut Self::Context<'a>,
2518        dst: Register,
2519        src: Source,
2520        offset: i16,
2521    ) -> Result<(), String> {
2522        self.jmp(dst, src)
2523    }
2524    fn jlt64<'a>(
2525        &mut self,
2526        _context: &mut Self::Context<'a>,
2527        dst: Register,
2528        src: Source,
2529        offset: i16,
2530    ) -> Result<(), String> {
2531        self.jmp(dst, src)
2532    }
2533    fn jsge<'a>(
2534        &mut self,
2535        _context: &mut Self::Context<'a>,
2536        dst: Register,
2537        src: Source,
2538        offset: i16,
2539    ) -> Result<(), String> {
2540        self.jmp(dst, src)
2541    }
2542    fn jsge64<'a>(
2543        &mut self,
2544        _context: &mut Self::Context<'a>,
2545        dst: Register,
2546        src: Source,
2547        offset: i16,
2548    ) -> Result<(), String> {
2549        self.jmp(dst, src)
2550    }
2551    fn jsgt<'a>(
2552        &mut self,
2553        _context: &mut Self::Context<'a>,
2554        dst: Register,
2555        src: Source,
2556        offset: i16,
2557    ) -> Result<(), String> {
2558        self.jmp(dst, src)
2559    }
2560    fn jsgt64<'a>(
2561        &mut self,
2562        _context: &mut Self::Context<'a>,
2563        dst: Register,
2564        src: Source,
2565        offset: i16,
2566    ) -> Result<(), String> {
2567        self.jmp(dst, src)
2568    }
2569    fn jsle<'a>(
2570        &mut self,
2571        _context: &mut Self::Context<'a>,
2572        dst: Register,
2573        src: Source,
2574        offset: i16,
2575    ) -> Result<(), String> {
2576        self.jmp(dst, src)
2577    }
2578    fn jsle64<'a>(
2579        &mut self,
2580        _context: &mut Self::Context<'a>,
2581        dst: Register,
2582        src: Source,
2583        offset: i16,
2584    ) -> Result<(), String> {
2585        self.jmp(dst, src)
2586    }
2587    fn jslt<'a>(
2588        &mut self,
2589        _context: &mut Self::Context<'a>,
2590        dst: Register,
2591        src: Source,
2592        offset: i16,
2593    ) -> Result<(), String> {
2594        self.jmp(dst, src)
2595    }
2596    fn jslt64<'a>(
2597        &mut self,
2598        _context: &mut Self::Context<'a>,
2599        dst: Register,
2600        src: Source,
2601        offset: i16,
2602    ) -> Result<(), String> {
2603        self.jmp(dst, src)
2604    }
2605    fn jset<'a>(
2606        &mut self,
2607        _context: &mut Self::Context<'a>,
2608        dst: Register,
2609        src: Source,
2610        offset: i16,
2611    ) -> Result<(), String> {
2612        self.jmp(dst, src)
2613    }
2614    fn jset64<'a>(
2615        &mut self,
2616        _context: &mut Self::Context<'a>,
2617        dst: Register,
2618        src: Source,
2619        offset: i16,
2620    ) -> Result<(), String> {
2621        self.jmp(dst, src)
2622    }
2623
2624    fn atomic_add<'a>(
2625        &mut self,
2626        context: &mut Self::Context<'a>,
2627        fetch: bool,
2628        dst: Register,
2629        offset: i16,
2630        src: Register,
2631    ) -> Result<(), String> {
2632        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2633    }
2634
2635    fn atomic_add64<'a>(
2636        &mut self,
2637        context: &mut Self::Context<'a>,
2638        fetch: bool,
2639        dst: Register,
2640        offset: i16,
2641        src: Register,
2642    ) -> Result<(), String> {
2643        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2644    }
2645
2646    fn atomic_and<'a>(
2647        &mut self,
2648        context: &mut Self::Context<'a>,
2649        fetch: bool,
2650        dst: Register,
2651        offset: i16,
2652        src: Register,
2653    ) -> Result<(), String> {
2654        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2655    }
2656
2657    fn atomic_and64<'a>(
2658        &mut self,
2659        context: &mut Self::Context<'a>,
2660        fetch: bool,
2661        dst: Register,
2662        offset: i16,
2663        src: Register,
2664    ) -> Result<(), String> {
2665        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2666    }
2667
2668    fn atomic_or<'a>(
2669        &mut self,
2670        context: &mut Self::Context<'a>,
2671        fetch: bool,
2672        dst: Register,
2673        offset: i16,
2674        src: Register,
2675    ) -> Result<(), String> {
2676        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2677    }
2678
2679    fn atomic_or64<'a>(
2680        &mut self,
2681        context: &mut Self::Context<'a>,
2682        fetch: bool,
2683        dst: Register,
2684        offset: i16,
2685        src: Register,
2686    ) -> Result<(), String> {
2687        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2688    }
2689
2690    fn atomic_xor<'a>(
2691        &mut self,
2692        context: &mut Self::Context<'a>,
2693        fetch: bool,
2694        dst: Register,
2695        offset: i16,
2696        src: Register,
2697    ) -> Result<(), String> {
2698        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2699    }
2700
2701    fn atomic_xor64<'a>(
2702        &mut self,
2703        context: &mut Self::Context<'a>,
2704        fetch: bool,
2705        dst: Register,
2706        offset: i16,
2707        src: Register,
2708    ) -> Result<(), String> {
2709        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2710    }
2711
2712    fn atomic_xchg<'a>(
2713        &mut self,
2714        context: &mut Self::Context<'a>,
2715        fetch: bool,
2716        dst: Register,
2717        offset: i16,
2718        src: Register,
2719    ) -> Result<(), String> {
2720        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2721    }
2722
2723    fn atomic_xchg64<'a>(
2724        &mut self,
2725        context: &mut Self::Context<'a>,
2726        fetch: bool,
2727        dst: Register,
2728        offset: i16,
2729        src: Register,
2730    ) -> Result<(), String> {
2731        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2732    }
2733
2734    fn atomic_cmpxchg<'a>(
2735        &mut self,
2736        context: &mut Self::Context<'a>,
2737        dst: Register,
2738        offset: i16,
2739        src: Register,
2740    ) -> Result<(), String> {
2741        self.atomic(&context.computation_context, true, dst, offset, src, DataWidth::U32, true)
2742    }
2743
2744    fn atomic_cmpxchg64<'a>(
2745        &mut self,
2746        context: &mut Self::Context<'a>,
2747        dst: Register,
2748        offset: i16,
2749        src: Register,
2750    ) -> Result<(), String> {
2751        self.atomic(&context.computation_context, true, dst, offset, src, DataWidth::U64, true)
2752    }
2753
2754    fn load<'a>(
2755        &mut self,
2756        context: &mut Self::Context<'a>,
2757        dst: Register,
2758        offset: i16,
2759        src: Register,
2760        width: DataWidth,
2761    ) -> Result<(), String> {
2762        let context = &context.computation_context;
2763        if self.registers.contains(&dst) {
2764            let addr = context.reg(src)?;
2765            if let Type::PtrToStack { offset: stack_offset } = addr {
2766                let stack_offset = stack_offset.add(offset);
2767                if !stack_offset.is_valid() {
2768                    return Err(format!("Invalid stack offset at {}", context.pc));
2769                }
2770                self.stack.insert(stack_offset.array_index());
2771            }
2772        }
2773        self.registers.insert(src);
2774        Ok(())
2775    }
2776
2777    fn load64<'a>(
2778        &mut self,
2779        _context: &mut Self::Context<'a>,
2780        dst: Register,
2781        _src: u8,
2782        _lower: u32,
2783    ) -> Result<(), String> {
2784        self.registers.remove(&dst);
2785        Ok(())
2786    }
2787
2788    fn load_from_packet<'a>(
2789        &mut self,
2790        _context: &mut Self::Context<'a>,
2791        dst: Register,
2792        src: Register,
2793        _offset: i32,
2794        register_offset: Option<Register>,
2795        _width: DataWidth,
2796    ) -> Result<(), String> {
2797        // 1 to 5 are scratch registers
2798        for register in 1..6 {
2799            self.registers.remove(&(register as Register));
2800        }
2801        // Only do something if the dst is read, otherwise the computation doesn't matter.
2802        if self.registers.remove(&dst) {
2803            self.registers.insert(src);
2804            if let Some(reg) = register_offset {
2805                self.registers.insert(reg);
2806            }
2807        }
2808        Ok(())
2809    }
2810
2811    fn store<'a>(
2812        &mut self,
2813        context: &mut Self::Context<'a>,
2814        dst: Register,
2815        offset: i16,
2816        src: Source,
2817        width: DataWidth,
2818    ) -> Result<(), String> {
2819        let context = &context.computation_context;
2820        let addr = context.reg(dst)?;
2821        if let Type::PtrToStack { offset: stack_offset } = addr {
2822            let stack_offset = stack_offset.add(offset);
2823            if !stack_offset.is_valid() {
2824                return Err(format!("Invalid stack offset at {}", context.pc));
2825            }
2826            if self.stack.remove(&stack_offset.array_index()) {
2827                if let Source::Reg(src) = src {
2828                    self.registers.insert(src);
2829                }
2830            }
2831        } else {
2832            if let Source::Reg(src) = src {
2833                self.registers.insert(src);
2834            }
2835        }
2836
2837        Ok(())
2838    }
2839}
2840
2841#[derive(Debug)]
2842struct TerminatingContext {
2843    computation_context: ComputationContext,
2844    dependencies: DataDependencies,
2845}
2846
2847#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2848enum AluType {
2849    Plain,
2850    Sub,
2851    Add,
2852}
2853
2854impl AluType {
2855    /// Can this operation be done one a pointer and a scalar.
2856    fn is_ptr_compatible(&self) -> bool {
2857        match self {
2858            Self::Sub | Self::Add => true,
2859            _ => false,
2860        }
2861    }
2862}
2863
2864#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2865enum JumpWidth {
2866    W32,
2867    W64,
2868}
2869
2870#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2871enum JumpType {
2872    Eq,
2873    Ge,
2874    Gt,
2875    Le,
2876    LooseComparaison,
2877    Lt,
2878    Ne,
2879    StrictComparaison,
2880    Unknown,
2881}
2882
2883impl JumpType {
2884    fn invert(&self) -> Self {
2885        match self {
2886            Self::Eq => Self::Ne,
2887            Self::Ge => Self::Lt,
2888            Self::Gt => Self::Le,
2889            Self::Le => Self::Gt,
2890            Self::LooseComparaison => Self::StrictComparaison,
2891            Self::Lt => Self::Ge,
2892            Self::Ne => Self::Eq,
2893            Self::StrictComparaison => Self::LooseComparaison,
2894            Self::Unknown => Self::Unknown,
2895        }
2896    }
2897
2898    fn is_strict(&self) -> bool {
2899        match self {
2900            Self::Gt | Self::Lt | Self::Ne | Self::StrictComparaison => true,
2901            _ => false,
2902        }
2903    }
2904}
2905
2906fn display_register(register: Register) -> String {
2907    format!("%r{register}")
2908}
2909
2910fn display_source(src: Source) -> String {
2911    match src {
2912        Source::Reg(r) => display_register(r),
2913        Source::Value(v) => format!("0x{v:x}"),
2914    }
2915}
2916
2917impl BpfVisitor for ComputationContext {
2918    type Context<'a> = VerificationContext<'a>;
2919
2920    fn add<'a>(
2921        &mut self,
2922        context: &mut Self::Context<'a>,
2923        dst: Register,
2924        src: Source,
2925    ) -> Result<(), String> {
2926        self.alu(Some("add32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x + y))
2927    }
2928    fn add64<'a>(
2929        &mut self,
2930        context: &mut Self::Context<'a>,
2931        dst: Register,
2932        src: Source,
2933    ) -> Result<(), String> {
2934        self.alu(Some("add"), context, dst, src, AluType::Add, |x, y| x + y)
2935    }
2936    fn and<'a>(
2937        &mut self,
2938        context: &mut Self::Context<'a>,
2939        dst: Register,
2940        src: Source,
2941    ) -> Result<(), String> {
2942        self.alu(Some("and32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x & y))
2943    }
2944    fn and64<'a>(
2945        &mut self,
2946        context: &mut Self::Context<'a>,
2947        dst: Register,
2948        src: Source,
2949    ) -> Result<(), String> {
2950        self.alu(Some("and"), context, dst, src, AluType::Plain, |x, y| x & y)
2951    }
2952    fn arsh<'a>(
2953        &mut self,
2954        context: &mut Self::Context<'a>,
2955        dst: Register,
2956        src: Source,
2957    ) -> Result<(), String> {
2958        self.alu(Some("arsh32"), context, dst, src, AluType::Plain, |x, y| {
2959            alu32(x, y, |x, y| x.ashr(y))
2960        })
2961    }
2962    fn arsh64<'a>(
2963        &mut self,
2964        context: &mut Self::Context<'a>,
2965        dst: Register,
2966        src: Source,
2967    ) -> Result<(), String> {
2968        self.alu(Some("arsh"), context, dst, src, AluType::Plain, |x, y| x.ashr(y))
2969    }
2970    fn div<'a>(
2971        &mut self,
2972        context: &mut Self::Context<'a>,
2973        dst: Register,
2974        src: Source,
2975    ) -> Result<(), String> {
2976        self.alu(Some("div32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x / y))
2977    }
2978    fn div64<'a>(
2979        &mut self,
2980        context: &mut Self::Context<'a>,
2981        dst: Register,
2982        src: Source,
2983    ) -> Result<(), String> {
2984        self.alu(Some("div"), context, dst, src, AluType::Plain, |x, y| x / y)
2985    }
2986    fn lsh<'a>(
2987        &mut self,
2988        context: &mut Self::Context<'a>,
2989        dst: Register,
2990        src: Source,
2991    ) -> Result<(), String> {
2992        self.alu(Some("lsh32"), context, dst, src, AluType::Plain, |x, y| {
2993            alu32(x, y, |x, y| x << y)
2994        })
2995    }
2996    fn lsh64<'a>(
2997        &mut self,
2998        context: &mut Self::Context<'a>,
2999        dst: Register,
3000        src: Source,
3001    ) -> Result<(), String> {
3002        self.alu(Some("lsh"), context, dst, src, AluType::Plain, |x, y| x << y)
3003    }
3004    fn r#mod<'a>(
3005        &mut self,
3006        context: &mut Self::Context<'a>,
3007        dst: Register,
3008        src: Source,
3009    ) -> Result<(), String> {
3010        self.alu(Some("mod32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x % y))
3011    }
3012    fn mod64<'a>(
3013        &mut self,
3014        context: &mut Self::Context<'a>,
3015        dst: Register,
3016        src: Source,
3017    ) -> Result<(), String> {
3018        self.alu(Some("mod"), context, dst, src, AluType::Plain, |x, y| x % y)
3019    }
3020    fn mov<'a>(
3021        &mut self,
3022        context: &mut Self::Context<'a>,
3023        dst: Register,
3024        src: Source,
3025    ) -> Result<(), String> {
3026        bpf_log!(self, context, "mov32 {}, {}", display_register(dst), display_source(src));
3027        let src = self.compute_source(src)?;
3028        let value = match src {
3029            Type::ScalarValue(data) => {
3030                let value = (data.value as u32) as u64;
3031                let unknown_mask = (data.unknown_mask as u32) as u64;
3032                let unwritten_mask = (data.unwritten_mask as u32) as u64;
3033                let urange = U64Range::compute_range_for_bytes_swap(0.into(), data.urange, 0, 0, 4);
3034                Type::ScalarValue(ScalarValueData::new(value, unknown_mask, unwritten_mask, urange))
3035            }
3036            _ => Type::default(),
3037        };
3038        let mut next = self.next()?;
3039        next.set_reg(dst, value)?;
3040        context.states.push(next);
3041        Ok(())
3042    }
3043    fn mov64<'a>(
3044        &mut self,
3045        context: &mut Self::Context<'a>,
3046        dst: Register,
3047        src: Source,
3048    ) -> Result<(), String> {
3049        bpf_log!(self, context, "mov {}, {}", display_register(dst), display_source(src));
3050        let src = self.compute_source(src)?;
3051        let mut next = self.next()?;
3052        next.set_reg(dst, src)?;
3053        context.states.push(next);
3054        Ok(())
3055    }
3056    fn mul<'a>(
3057        &mut self,
3058        context: &mut Self::Context<'a>,
3059        dst: Register,
3060        src: Source,
3061    ) -> Result<(), String> {
3062        self.alu(Some("mul32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x * y))
3063    }
3064    fn mul64<'a>(
3065        &mut self,
3066        context: &mut Self::Context<'a>,
3067        dst: Register,
3068        src: Source,
3069    ) -> Result<(), String> {
3070        self.alu(Some("mul"), context, dst, src, AluType::Plain, |x, y| x * y)
3071    }
3072    fn or<'a>(
3073        &mut self,
3074        context: &mut Self::Context<'a>,
3075        dst: Register,
3076        src: Source,
3077    ) -> Result<(), String> {
3078        self.alu(Some("or32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x | y))
3079    }
3080    fn or64<'a>(
3081        &mut self,
3082        context: &mut Self::Context<'a>,
3083        dst: Register,
3084        src: Source,
3085    ) -> Result<(), String> {
3086        self.alu(Some("or"), context, dst, src, AluType::Plain, |x, y| x | y)
3087    }
3088    fn rsh<'a>(
3089        &mut self,
3090        context: &mut Self::Context<'a>,
3091        dst: Register,
3092        src: Source,
3093    ) -> Result<(), String> {
3094        self.alu(Some("rsh32"), context, dst, src, AluType::Plain, |x, y| {
3095            alu32(x, y, |x, y| x >> y)
3096        })
3097    }
3098    fn rsh64<'a>(
3099        &mut self,
3100        context: &mut Self::Context<'a>,
3101        dst: Register,
3102        src: Source,
3103    ) -> Result<(), String> {
3104        self.alu(Some("rsh"), context, dst, src, AluType::Plain, |x, y| x >> y)
3105    }
3106    fn sub<'a>(
3107        &mut self,
3108        context: &mut Self::Context<'a>,
3109        dst: Register,
3110        src: Source,
3111    ) -> Result<(), String> {
3112        self.alu(Some("sub32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x - y))
3113    }
3114    fn sub64<'a>(
3115        &mut self,
3116        context: &mut Self::Context<'a>,
3117        dst: Register,
3118        src: Source,
3119    ) -> Result<(), String> {
3120        self.alu(Some("sub"), context, dst, src, AluType::Sub, |x, y| x - y)
3121    }
3122    fn xor<'a>(
3123        &mut self,
3124        context: &mut Self::Context<'a>,
3125        dst: Register,
3126        src: Source,
3127    ) -> Result<(), String> {
3128        self.alu(Some("xor32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x ^ y))
3129    }
3130    fn xor64<'a>(
3131        &mut self,
3132        context: &mut Self::Context<'a>,
3133        dst: Register,
3134        src: Source,
3135    ) -> Result<(), String> {
3136        self.alu(Some("xor"), context, dst, src, AluType::Plain, |x, y| x ^ y)
3137    }
3138
3139    fn neg<'a>(&mut self, context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
3140        bpf_log!(self, context, "neg32 {}", display_register(dst));
3141        self.alu(None, context, dst, Source::Value(0), AluType::Plain, |x, y| {
3142            alu32(x, y, |x, _y| -x)
3143        })
3144    }
3145    fn neg64<'a>(&mut self, context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
3146        bpf_log!(self, context, "neg {}", display_register(dst));
3147        self.alu(None, context, dst, Source::Value(0), AluType::Plain, |x, _y| -x)
3148    }
3149
3150    fn be<'a>(
3151        &mut self,
3152        context: &mut Self::Context<'a>,
3153        dst: Register,
3154        width: DataWidth,
3155    ) -> Result<(), String> {
3156        self.endianness::<BigEndian>("be", context, dst, width)
3157    }
3158
3159    fn le<'a>(
3160        &mut self,
3161        context: &mut Self::Context<'a>,
3162        dst: Register,
3163        width: DataWidth,
3164    ) -> Result<(), String> {
3165        self.endianness::<LittleEndian>("le", context, dst, width)
3166    }
3167
3168    fn call_external<'a>(
3169        &mut self,
3170        context: &mut Self::Context<'a>,
3171        index: u32,
3172    ) -> Result<(), String> {
3173        bpf_log!(self, context, "call 0x{:x}", index);
3174        let Some(signature) = context.calling_context.helpers.get(&index).cloned() else {
3175            return Err(format!("unknown external function {}", index));
3176        };
3177        debug_assert!(signature.args.len() <= 5);
3178        let mut next = self.next()?;
3179        for (index, arg) in signature.args.iter().enumerate() {
3180            let reg = (index + 1) as u8;
3181            self.reg(reg)?.match_parameter_type(self, arg, index, &mut next)?;
3182        }
3183        // Parameters have been validated, specify the return value on return.
3184        if signature.invalidate_array_bounds {
3185            next.array_bounds.clear();
3186        }
3187        let value =
3188            self.resolve_return_value(context, &signature.return_value, &mut next, false)?;
3189        next.set_reg(0, value)?;
3190        for i in 1..=5 {
3191            next.set_reg(i, Type::default())?;
3192        }
3193        context.states.push(next);
3194        Ok(())
3195    }
3196
3197    fn exit<'a>(&mut self, context: &mut Self::Context<'a>) -> Result<(), String> {
3198        bpf_log!(self, context, "exit");
3199        if !self.reg(0)?.is_written_scalar() {
3200            return Err("register 0 is incorrect at exit time".to_string());
3201        }
3202        if !self.resources.is_empty() {
3203            return Err("some resources have not been released at exit time".to_string());
3204        }
3205        let this = self.clone();
3206        this.terminate(context)?;
3207        // Nothing to do, the program terminated with a valid scalar value.
3208        Ok(())
3209    }
3210
3211    fn jump<'a>(&mut self, context: &mut Self::Context<'a>, offset: i16) -> Result<(), String> {
3212        bpf_log!(self, context, "ja {}", offset);
3213        let parent = Some(Arc::new(self.clone()));
3214        context.states.push(self.jump_with_offset(offset, parent)?);
3215        Ok(())
3216    }
3217
3218    fn jeq<'a>(
3219        &mut self,
3220        context: &mut Self::Context<'a>,
3221        dst: Register,
3222        src: Source,
3223        offset: i16,
3224    ) -> Result<(), String> {
3225        self.conditional_jump(
3226            "jeq32",
3227            context,
3228            dst,
3229            src,
3230            offset,
3231            JumpType::Eq,
3232            JumpWidth::W32,
3233            |x, y| {
3234                comp32(x, y, |x, y| {
3235                    // x == y
3236                    if x.min == x.max && x.min == y.min && x.min == y.max {
3237                        return Some(true);
3238                    }
3239                    if x.max < y.min || y.max < x.min {
3240                        return Some(false);
3241                    }
3242                    None
3243                })
3244            },
3245        )
3246    }
3247    fn jeq64<'a>(
3248        &mut self,
3249        context: &mut Self::Context<'a>,
3250        dst: Register,
3251        src: Source,
3252        offset: i16,
3253    ) -> Result<(), String> {
3254        self.conditional_jump(
3255            "jeq",
3256            context,
3257            dst,
3258            src,
3259            offset,
3260            JumpType::Eq,
3261            JumpWidth::W64,
3262            |x, y| {
3263                comp64(x, y, |x, y| {
3264                    // x == y
3265                    if x.min == x.max && x.min == y.min && x.min == y.max {
3266                        return Some(true);
3267                    }
3268                    if x.max < y.min || y.max < x.min {
3269                        return Some(false);
3270                    }
3271                    None
3272                })
3273            },
3274        )
3275    }
3276    fn jne<'a>(
3277        &mut self,
3278        context: &mut Self::Context<'a>,
3279        dst: Register,
3280        src: Source,
3281        offset: i16,
3282    ) -> Result<(), String> {
3283        self.conditional_jump(
3284            "jne32",
3285            context,
3286            dst,
3287            src,
3288            offset,
3289            JumpType::Ne,
3290            JumpWidth::W32,
3291            |x, y| {
3292                comp32(x, y, |x, y| {
3293                    // x != y
3294                    if x.min == x.max && x.min == y.min && x.min == y.max {
3295                        return Some(false);
3296                    }
3297                    if x.max < y.min || y.max < x.min {
3298                        return Some(true);
3299                    }
3300                    None
3301                })
3302            },
3303        )
3304    }
3305    fn jne64<'a>(
3306        &mut self,
3307        context: &mut Self::Context<'a>,
3308        dst: Register,
3309        src: Source,
3310        offset: i16,
3311    ) -> Result<(), String> {
3312        self.conditional_jump(
3313            "jne",
3314            context,
3315            dst,
3316            src,
3317            offset,
3318            JumpType::Ne,
3319            JumpWidth::W64,
3320            |x, y| {
3321                comp64(x, y, |x, y| {
3322                    // x != y
3323                    if x.min == x.max && x.min == y.min && x.min == y.max {
3324                        return Some(false);
3325                    }
3326                    if x.max < y.min || y.max < x.min {
3327                        return Some(true);
3328                    }
3329                    None
3330                })
3331            },
3332        )
3333    }
3334    fn jge<'a>(
3335        &mut self,
3336        context: &mut Self::Context<'a>,
3337        dst: Register,
3338        src: Source,
3339        offset: i16,
3340    ) -> Result<(), String> {
3341        self.conditional_jump(
3342            "jge32",
3343            context,
3344            dst,
3345            src,
3346            offset,
3347            JumpType::Ge,
3348            JumpWidth::W32,
3349            |x, y| {
3350                comp32(x, y, |x, y| {
3351                    // x >= y
3352                    if x.min >= y.max {
3353                        return Some(true);
3354                    }
3355                    if y.min > x.max {
3356                        return Some(false);
3357                    }
3358                    None
3359                })
3360            },
3361        )
3362    }
3363    fn jge64<'a>(
3364        &mut self,
3365        context: &mut Self::Context<'a>,
3366        dst: Register,
3367        src: Source,
3368        offset: i16,
3369    ) -> Result<(), String> {
3370        self.conditional_jump(
3371            "jge",
3372            context,
3373            dst,
3374            src,
3375            offset,
3376            JumpType::Ge,
3377            JumpWidth::W64,
3378            |x, y| {
3379                comp64(x, y, |x, y| {
3380                    // x >= y
3381                    if x.min >= y.max {
3382                        return Some(true);
3383                    }
3384                    if y.min > x.max {
3385                        return Some(false);
3386                    }
3387                    None
3388                })
3389            },
3390        )
3391    }
3392    fn jgt<'a>(
3393        &mut self,
3394        context: &mut Self::Context<'a>,
3395        dst: Register,
3396        src: Source,
3397        offset: i16,
3398    ) -> Result<(), String> {
3399        self.conditional_jump(
3400            "jgt32",
3401            context,
3402            dst,
3403            src,
3404            offset,
3405            JumpType::Gt,
3406            JumpWidth::W32,
3407            |x, y| {
3408                comp32(x, y, |x, y| {
3409                    // x > y
3410                    if x.min > y.max {
3411                        return Some(true);
3412                    }
3413                    if y.min >= x.max {
3414                        return Some(false);
3415                    }
3416                    None
3417                })
3418            },
3419        )
3420    }
3421    fn jgt64<'a>(
3422        &mut self,
3423        context: &mut Self::Context<'a>,
3424        dst: Register,
3425        src: Source,
3426        offset: i16,
3427    ) -> Result<(), String> {
3428        self.conditional_jump(
3429            "jgt",
3430            context,
3431            dst,
3432            src,
3433            offset,
3434            JumpType::Gt,
3435            JumpWidth::W64,
3436            |x, y| {
3437                comp64(x, y, |x, y| {
3438                    // x > y
3439                    if x.min > y.max {
3440                        return Some(true);
3441                    }
3442                    if y.min >= x.max {
3443                        return Some(false);
3444                    }
3445                    None
3446                })
3447            },
3448        )
3449    }
3450    fn jle<'a>(
3451        &mut self,
3452        context: &mut Self::Context<'a>,
3453        dst: Register,
3454        src: Source,
3455        offset: i16,
3456    ) -> Result<(), String> {
3457        self.conditional_jump(
3458            "jle32",
3459            context,
3460            dst,
3461            src,
3462            offset,
3463            JumpType::Le,
3464            JumpWidth::W32,
3465            |x, y| {
3466                comp32(x, y, |x, y| {
3467                    // x <= y
3468                    if x.max <= y.min {
3469                        return Some(true);
3470                    }
3471                    if y.max < x.min {
3472                        return Some(false);
3473                    }
3474                    None
3475                })
3476            },
3477        )
3478    }
3479    fn jle64<'a>(
3480        &mut self,
3481        context: &mut Self::Context<'a>,
3482        dst: Register,
3483        src: Source,
3484        offset: i16,
3485    ) -> Result<(), String> {
3486        self.conditional_jump(
3487            "jle",
3488            context,
3489            dst,
3490            src,
3491            offset,
3492            JumpType::Le,
3493            JumpWidth::W64,
3494            |x, y| {
3495                comp64(x, y, |x, y| {
3496                    // x <= y
3497                    if x.max <= y.min {
3498                        return Some(true);
3499                    }
3500                    if y.max < x.min {
3501                        return Some(false);
3502                    }
3503                    None
3504                })
3505            },
3506        )
3507    }
3508    fn jlt<'a>(
3509        &mut self,
3510        context: &mut Self::Context<'a>,
3511        dst: Register,
3512        src: Source,
3513        offset: i16,
3514    ) -> Result<(), String> {
3515        self.conditional_jump(
3516            "jlt32",
3517            context,
3518            dst,
3519            src,
3520            offset,
3521            JumpType::Lt,
3522            JumpWidth::W32,
3523            |x, y| {
3524                comp32(x, y, |x, y| {
3525                    // x < y
3526                    if x.max < y.min {
3527                        return Some(true);
3528                    }
3529                    if y.max <= x.min {
3530                        return Some(false);
3531                    }
3532                    None
3533                })
3534            },
3535        )
3536    }
3537    fn jlt64<'a>(
3538        &mut self,
3539        context: &mut Self::Context<'a>,
3540        dst: Register,
3541        src: Source,
3542        offset: i16,
3543    ) -> Result<(), String> {
3544        self.conditional_jump(
3545            "jlt",
3546            context,
3547            dst,
3548            src,
3549            offset,
3550            JumpType::Lt,
3551            JumpWidth::W64,
3552            |x, y| {
3553                comp64(x, y, |x, y| {
3554                    // x < y
3555                    if x.max < y.min {
3556                        return Some(true);
3557                    }
3558                    if y.max <= x.min {
3559                        return Some(false);
3560                    }
3561                    None
3562                })
3563            },
3564        )
3565    }
3566    fn jsge<'a>(
3567        &mut self,
3568        context: &mut Self::Context<'a>,
3569        dst: Register,
3570        src: Source,
3571        offset: i16,
3572    ) -> Result<(), String> {
3573        self.conditional_jump(
3574            "jsge32",
3575            context,
3576            dst,
3577            src,
3578            offset,
3579            JumpType::LooseComparaison,
3580            JumpWidth::W32,
3581            |x, y| scomp32(x, y, |x, y| x >= y),
3582        )
3583    }
3584    fn jsge64<'a>(
3585        &mut self,
3586        context: &mut Self::Context<'a>,
3587        dst: Register,
3588        src: Source,
3589        offset: i16,
3590    ) -> Result<(), String> {
3591        self.conditional_jump(
3592            "jsge",
3593            context,
3594            dst,
3595            src,
3596            offset,
3597            JumpType::LooseComparaison,
3598            JumpWidth::W64,
3599            |x, y| scomp64(x, y, |x, y| x >= y),
3600        )
3601    }
3602    fn jsgt<'a>(
3603        &mut self,
3604        context: &mut Self::Context<'a>,
3605        dst: Register,
3606        src: Source,
3607        offset: i16,
3608    ) -> Result<(), String> {
3609        self.conditional_jump(
3610            "jsgt32",
3611            context,
3612            dst,
3613            src,
3614            offset,
3615            JumpType::StrictComparaison,
3616            JumpWidth::W32,
3617            |x, y| scomp32(x, y, |x, y| x > y),
3618        )
3619    }
3620    fn jsgt64<'a>(
3621        &mut self,
3622        context: &mut Self::Context<'a>,
3623        dst: Register,
3624        src: Source,
3625        offset: i16,
3626    ) -> Result<(), String> {
3627        self.conditional_jump(
3628            "jsgt",
3629            context,
3630            dst,
3631            src,
3632            offset,
3633            JumpType::StrictComparaison,
3634            JumpWidth::W64,
3635            |x, y| scomp64(x, y, |x, y| x > y),
3636        )
3637    }
3638    fn jsle<'a>(
3639        &mut self,
3640        context: &mut Self::Context<'a>,
3641        dst: Register,
3642        src: Source,
3643        offset: i16,
3644    ) -> Result<(), String> {
3645        self.conditional_jump(
3646            "jsle32",
3647            context,
3648            dst,
3649            src,
3650            offset,
3651            JumpType::LooseComparaison,
3652            JumpWidth::W32,
3653            |x, y| scomp32(x, y, |x, y| x <= y),
3654        )
3655    }
3656    fn jsle64<'a>(
3657        &mut self,
3658        context: &mut Self::Context<'a>,
3659        dst: Register,
3660        src: Source,
3661        offset: i16,
3662    ) -> Result<(), String> {
3663        self.conditional_jump(
3664            "jsle",
3665            context,
3666            dst,
3667            src,
3668            offset,
3669            JumpType::LooseComparaison,
3670            JumpWidth::W64,
3671            |x, y| scomp64(x, y, |x, y| x <= y),
3672        )
3673    }
3674    fn jslt<'a>(
3675        &mut self,
3676        context: &mut Self::Context<'a>,
3677        dst: Register,
3678        src: Source,
3679        offset: i16,
3680    ) -> Result<(), String> {
3681        self.conditional_jump(
3682            "jslt32",
3683            context,
3684            dst,
3685            src,
3686            offset,
3687            JumpType::StrictComparaison,
3688            JumpWidth::W32,
3689            |x, y| scomp32(x, y, |x, y| x < y),
3690        )
3691    }
3692    fn jslt64<'a>(
3693        &mut self,
3694        context: &mut Self::Context<'a>,
3695        dst: Register,
3696        src: Source,
3697        offset: i16,
3698    ) -> Result<(), String> {
3699        self.conditional_jump(
3700            "jslt",
3701            context,
3702            dst,
3703            src,
3704            offset,
3705            JumpType::StrictComparaison,
3706            JumpWidth::W64,
3707            |x, y| scomp64(x, y, |x, y| x < y),
3708        )
3709    }
3710    fn jset<'a>(
3711        &mut self,
3712        context: &mut Self::Context<'a>,
3713        dst: Register,
3714        src: Source,
3715        offset: i16,
3716    ) -> Result<(), String> {
3717        self.conditional_jump(
3718            "jset32",
3719            context,
3720            dst,
3721            src,
3722            offset,
3723            JumpType::Unknown,
3724            JumpWidth::W32,
3725            |x, y| {
3726                comp32(x, y, |x, y| {
3727                    // x & y != 0
3728                    if x.min != x.max || y.min != y.max {
3729                        return None;
3730                    }
3731                    Some(x.min & y.min != 0)
3732                })
3733            },
3734        )
3735    }
3736    fn jset64<'a>(
3737        &mut self,
3738        context: &mut Self::Context<'a>,
3739        dst: Register,
3740        src: Source,
3741        offset: i16,
3742    ) -> Result<(), String> {
3743        self.conditional_jump(
3744            "jset",
3745            context,
3746            dst,
3747            src,
3748            offset,
3749            JumpType::Unknown,
3750            JumpWidth::W64,
3751            |x, y| {
3752                comp64(x, y, |x, y| {
3753                    // x & y != 0
3754                    if x.min != x.max || y.min != y.max {
3755                        return None;
3756                    }
3757                    Some(x.min & y.min != 0)
3758                })
3759            },
3760        )
3761    }
3762
3763    fn atomic_add<'a>(
3764        &mut self,
3765        context: &mut Self::Context<'a>,
3766        fetch: bool,
3767        dst: Register,
3768        offset: i16,
3769        src: Register,
3770    ) -> Result<(), String> {
3771        self.atomic_operation(
3772            "add32",
3773            context,
3774            DataWidth::U32,
3775            fetch,
3776            dst,
3777            offset,
3778            src,
3779            AluType::Add,
3780            |x, y| alu32(x, y, |x, y| x + y),
3781        )
3782    }
3783
3784    fn atomic_add64<'a>(
3785        &mut self,
3786        context: &mut Self::Context<'a>,
3787        fetch: bool,
3788        dst: Register,
3789        offset: i16,
3790        src: Register,
3791    ) -> Result<(), String> {
3792        self.atomic_operation(
3793            "add",
3794            context,
3795            DataWidth::U64,
3796            fetch,
3797            dst,
3798            offset,
3799            src,
3800            AluType::Add,
3801            |x, y| x + y,
3802        )
3803    }
3804
3805    fn atomic_and<'a>(
3806        &mut self,
3807        context: &mut Self::Context<'a>,
3808        fetch: bool,
3809        dst: Register,
3810        offset: i16,
3811        src: Register,
3812    ) -> Result<(), String> {
3813        self.atomic_operation(
3814            "and32",
3815            context,
3816            DataWidth::U32,
3817            fetch,
3818            dst,
3819            offset,
3820            src,
3821            AluType::Plain,
3822            |x, y| alu32(x, y, |x, y| x & y),
3823        )
3824    }
3825
3826    fn atomic_and64<'a>(
3827        &mut self,
3828        context: &mut Self::Context<'a>,
3829        fetch: bool,
3830        dst: Register,
3831        offset: i16,
3832        src: Register,
3833    ) -> Result<(), String> {
3834        self.atomic_operation(
3835            "and",
3836            context,
3837            DataWidth::U64,
3838            fetch,
3839            dst,
3840            offset,
3841            src,
3842            AluType::Plain,
3843            |x, y| x & y,
3844        )
3845    }
3846
3847    fn atomic_or<'a>(
3848        &mut self,
3849        context: &mut Self::Context<'a>,
3850        fetch: bool,
3851        dst: Register,
3852        offset: i16,
3853        src: Register,
3854    ) -> Result<(), String> {
3855        self.atomic_operation(
3856            "or32",
3857            context,
3858            DataWidth::U32,
3859            fetch,
3860            dst,
3861            offset,
3862            src,
3863            AluType::Plain,
3864            |x, y| alu32(x, y, |x, y| x | y),
3865        )
3866    }
3867
3868    fn atomic_or64<'a>(
3869        &mut self,
3870        context: &mut Self::Context<'a>,
3871        fetch: bool,
3872        dst: Register,
3873        offset: i16,
3874        src: Register,
3875    ) -> Result<(), String> {
3876        self.atomic_operation(
3877            "or",
3878            context,
3879            DataWidth::U64,
3880            fetch,
3881            dst,
3882            offset,
3883            src,
3884            AluType::Plain,
3885            |x, y| x | y,
3886        )
3887    }
3888
3889    fn atomic_xor<'a>(
3890        &mut self,
3891        context: &mut Self::Context<'a>,
3892        fetch: bool,
3893        dst: Register,
3894        offset: i16,
3895        src: Register,
3896    ) -> Result<(), String> {
3897        self.atomic_operation(
3898            "xor32",
3899            context,
3900            DataWidth::U32,
3901            fetch,
3902            dst,
3903            offset,
3904            src,
3905            AluType::Plain,
3906            |x, y| alu32(x, y, |x, y| x ^ y),
3907        )
3908    }
3909
3910    fn atomic_xor64<'a>(
3911        &mut self,
3912        context: &mut Self::Context<'a>,
3913        fetch: bool,
3914        dst: Register,
3915        offset: i16,
3916        src: Register,
3917    ) -> Result<(), String> {
3918        self.atomic_operation(
3919            "xor",
3920            context,
3921            DataWidth::U64,
3922            fetch,
3923            dst,
3924            offset,
3925            src,
3926            AluType::Plain,
3927            |x, y| x ^ y,
3928        )
3929    }
3930
3931    fn atomic_xchg<'a>(
3932        &mut self,
3933        context: &mut Self::Context<'a>,
3934        fetch: bool,
3935        dst: Register,
3936        offset: i16,
3937        src: Register,
3938    ) -> Result<(), String> {
3939        self.atomic_operation(
3940            "xchg32",
3941            context,
3942            DataWidth::U32,
3943            fetch,
3944            dst,
3945            offset,
3946            src,
3947            AluType::Plain,
3948            |_, x| x,
3949        )
3950    }
3951
3952    fn atomic_xchg64<'a>(
3953        &mut self,
3954        context: &mut Self::Context<'a>,
3955        fetch: bool,
3956        dst: Register,
3957        offset: i16,
3958        src: Register,
3959    ) -> Result<(), String> {
3960        self.raw_atomic_operation(
3961            "xchg",
3962            context,
3963            DataWidth::U64,
3964            fetch,
3965            dst,
3966            offset,
3967            src,
3968            |_, _, x| Ok(x),
3969        )
3970    }
3971
3972    fn atomic_cmpxchg<'a>(
3973        &mut self,
3974        context: &mut Self::Context<'a>,
3975        dst: Register,
3976        offset: i16,
3977        src: Register,
3978    ) -> Result<(), String> {
3979        self.raw_atomic_cmpxchg("cmpxchg32", context, dst, offset, src, JumpWidth::W32, |x, y| {
3980            comp32(x, y, |x, y| {
3981                // x == y
3982                if x.min == x.max && x.min == y.min && x.min == y.max {
3983                    return Some(true);
3984                }
3985                if x.max < y.min || y.max < x.min {
3986                    return Some(false);
3987                }
3988                None
3989            })
3990        })
3991    }
3992
3993    fn atomic_cmpxchg64<'a>(
3994        &mut self,
3995        context: &mut Self::Context<'a>,
3996        dst: Register,
3997        offset: i16,
3998        src: Register,
3999    ) -> Result<(), String> {
4000        self.raw_atomic_cmpxchg("cmpxchg", context, dst, offset, src, JumpWidth::W64, |x, y| {
4001            comp64(x, y, |x, y| {
4002                // x == y
4003                if x.min == x.max && x.min == y.min && x.min == y.max {
4004                    return Some(true);
4005                }
4006                if x.max < y.min || y.max < x.min {
4007                    return Some(false);
4008                }
4009                None
4010            })
4011        })
4012    }
4013
4014    fn load<'a>(
4015        &mut self,
4016        context: &mut Self::Context<'a>,
4017        dst: Register,
4018        offset: i16,
4019        src: Register,
4020        width: DataWidth,
4021    ) -> Result<(), String> {
4022        bpf_log!(
4023            self,
4024            context,
4025            "ldx{} {}, [{}{}]",
4026            width.str(),
4027            display_register(dst),
4028            display_register(src),
4029            print_offset(offset)
4030        );
4031        let addr = self.reg(src)?;
4032        let loaded_type = self.load_memory(context, &addr, Field::new(offset, width))?;
4033        let mut next = self.next()?;
4034        next.set_reg(dst, loaded_type)?;
4035        context.states.push(next);
4036        Ok(())
4037    }
4038
4039    fn load64<'a>(
4040        &mut self,
4041        context: &mut Self::Context<'a>,
4042        dst: Register,
4043        src: u8,
4044        lower: u32,
4045    ) -> Result<(), String> {
4046        let Some(next_instruction) = context.code.get(self.pc + 1) else {
4047            return Err(format!("incomplete lddw"));
4048        };
4049        if next_instruction.src_reg() != 0 || next_instruction.dst_reg() != 0 {
4050            return Err(format!("invalid lddw"));
4051        }
4052
4053        let value = match src {
4054            0 => {
4055                let value = (lower as u64) | (((next_instruction.imm() as u32) as u64) << 32);
4056                bpf_log!(self, context, "lddw {}, 0x{:x}", display_register(dst), value);
4057                Type::from(value)
4058            }
4059            BPF_PSEUDO_MAP_IDX => {
4060                let map_index = lower;
4061                bpf_log!(
4062                    self,
4063                    context,
4064                    "lddw {}, map_by_index({:x})",
4065                    display_register(dst),
4066                    map_index
4067                );
4068                context
4069                    .calling_context
4070                    .maps
4071                    .get(usize::try_from(map_index).unwrap())
4072                    .map(|schema| Type::ConstPtrToMap { id: map_index.into(), schema: *schema })
4073                    .ok_or_else(|| format!("lddw with invalid map index: {}", map_index))?
4074            }
4075            BPF_PSEUDO_MAP_IDX_VALUE => {
4076                let map_index = lower;
4077                let offset = next_instruction.imm();
4078                bpf_log!(
4079                    self,
4080                    context,
4081                    "lddw {}, map_value_by_index({:x})+{offset}",
4082                    display_register(dst),
4083                    map_index
4084                );
4085                let id = context.next_id();
4086                let map_schema = context
4087                    .calling_context
4088                    .maps
4089                    .get(usize::try_from(map_index).unwrap())
4090                    .ok_or_else(|| format!("lddw with invalid map index: {}", map_index))?;
4091
4092                if map_schema.map_type != bpf_map_type_BPF_MAP_TYPE_ARRAY {
4093                    return Err(format!(
4094                        "Invalid map type at index {map_index} for lddw. Expecting array."
4095                    ));
4096                }
4097                if map_schema.max_entries == 0 {
4098                    return Err(format!("Array has no entry."));
4099                }
4100
4101                Type::PtrToMemory {
4102                    id: MemoryId::from(id),
4103                    offset: offset.into(),
4104                    buffer_size: map_schema.value_size.into(),
4105                }
4106            }
4107            _ => {
4108                return Err(format!("invalid lddw"));
4109            }
4110        };
4111
4112        let parent = Some(Arc::new(self.clone()));
4113        let mut next = self.jump_with_offset(1, parent)?;
4114        next.set_reg(dst, value.into())?;
4115
4116        context.states.push(next);
4117        Ok(())
4118    }
4119
4120    fn load_from_packet<'a>(
4121        &mut self,
4122        context: &mut Self::Context<'a>,
4123        dst: Register,
4124        src: Register,
4125        offset: i32,
4126        register_offset: Option<Register>,
4127        width: DataWidth,
4128    ) -> Result<(), String> {
4129        bpf_log!(
4130            self,
4131            context,
4132            "ldp{} {}{}",
4133            width.str(),
4134            register_offset.map(display_register).unwrap_or_else(Default::default),
4135            print_offset(offset)
4136        );
4137
4138        // Verify that `src` refers to a packet.
4139        let src_type = self.reg(src)?;
4140        let src_is_packet = match &context.calling_context.packet_type {
4141            Some(packet_type) => src_type == *packet_type,
4142            None => false,
4143        };
4144        if !src_is_packet {
4145            return Err(format!("R{} is not a packet", src));
4146        }
4147
4148        if let Some(reg) = register_offset {
4149            let reg = self.reg(reg)?;
4150            if !reg.is_written_scalar() {
4151                return Err("access to unwritten offset".to_string());
4152            }
4153        }
4154        // Handle the case where the load succeed.
4155        let mut next = self.next()?;
4156        next.set_reg(dst, Type::UNKNOWN_SCALAR)?;
4157        for i in 1..=5 {
4158            next.set_reg(i, Type::default())?;
4159        }
4160        context.states.push(next);
4161        // Handle the case where the load fails.
4162        if !self.reg(0)?.is_written_scalar() {
4163            return Err("register 0 is incorrect at exit time".to_string());
4164        }
4165        if !self.resources.is_empty() {
4166            return Err("some resources have not been released at exit time".to_string());
4167        }
4168        let this = self.clone();
4169        this.terminate(context)?;
4170        // Nothing to do, the program terminated with a valid scalar value.
4171        Ok(())
4172    }
4173
4174    fn store<'a>(
4175        &mut self,
4176        context: &mut Self::Context<'a>,
4177        dst: Register,
4178        offset: i16,
4179        src: Source,
4180        width: DataWidth,
4181    ) -> Result<(), String> {
4182        let value = match src {
4183            Source::Reg(r) => {
4184                bpf_log!(
4185                    self,
4186                    context,
4187                    "stx{} [{}{}], {}",
4188                    width.str(),
4189                    display_register(dst),
4190                    print_offset(offset),
4191                    display_register(r),
4192                );
4193                self.reg(r)?
4194            }
4195            Source::Value(v) => {
4196                bpf_log!(
4197                    self,
4198                    context,
4199                    "st{} [{}{}], 0x{:x}",
4200                    width.str(),
4201                    display_register(dst),
4202                    print_offset(offset),
4203                    v,
4204                );
4205                Type::from(v & Type::mask(width))
4206            }
4207        };
4208        let mut next = self.next()?;
4209        let addr = self.reg(dst)?;
4210        next.store_memory(context, &addr, Field::new(offset, width), value)?;
4211        context.states.push(next);
4212        Ok(())
4213    }
4214}
4215
4216fn alu32(
4217    x: ScalarValueData,
4218    y: ScalarValueData,
4219    op: impl FnOnce(U32ScalarValueData, U32ScalarValueData) -> U32ScalarValueData,
4220) -> ScalarValueData {
4221    op(U32ScalarValueData::from(x), U32ScalarValueData::from(y)).into()
4222}
4223
4224fn comp64(
4225    x: ScalarValueData,
4226    y: ScalarValueData,
4227    op: impl FnOnce(U64Range, U64Range) -> Option<bool>,
4228) -> Result<Option<bool>, ()> {
4229    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4230        return Err(());
4231    }
4232    Ok(op(x.urange, y.urange))
4233}
4234
4235fn comp32(
4236    x: ScalarValueData,
4237    y: ScalarValueData,
4238    op: impl FnOnce(U32Range, U32Range) -> Option<bool>,
4239) -> Result<Option<bool>, ()> {
4240    let x = U32ScalarValueData::from(x);
4241    let y = U32ScalarValueData::from(y);
4242    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4243        return Err(());
4244    }
4245    Ok(op(x.urange, y.urange))
4246}
4247
4248fn scomp64(
4249    x: ScalarValueData,
4250    y: ScalarValueData,
4251    op: impl FnOnce(i64, i64) -> bool,
4252) -> Result<Option<bool>, ()> {
4253    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4254        return Err(());
4255    }
4256    if !x.is_known() || !y.is_known() {
4257        return Ok(None);
4258    }
4259    Ok(Some(op(x.value as i64, y.value as i64)))
4260}
4261
4262fn scomp32(
4263    x: ScalarValueData,
4264    y: ScalarValueData,
4265    op: impl FnOnce(i32, i32) -> bool,
4266) -> Result<Option<bool>, ()> {
4267    let x = U32ScalarValueData::from(x);
4268    let y = U32ScalarValueData::from(y);
4269    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4270        return Err(());
4271    }
4272    if !x.is_known() || !y.is_known() {
4273        return Ok(None);
4274    }
4275    Ok(Some(op(x.value as i32, y.value as i32)))
4276}
4277
4278fn print_offset<T: Into<i32>>(offset: T) -> String {
4279    let offset: i32 = offset.into();
4280    if offset == 0 {
4281        String::new()
4282    } else if offset > 0 {
4283        format!("+{offset}")
4284    } else {
4285        format!("{offset}")
4286    }
4287}
4288
4289fn run_on_stack_offset<F>(v: StackOffset, f: F) -> StackOffset
4290where
4291    F: FnOnce(ScalarValueData) -> ScalarValueData,
4292{
4293    StackOffset(f(v.reg()))
4294}
4295
4296fn error_and_log<T>(
4297    logger: &mut dyn VerifierLogger,
4298    msg: impl std::string::ToString,
4299) -> Result<T, EbpfError> {
4300    let msg = msg.to_string();
4301    logger.log(msg.as_bytes());
4302    return Err(EbpfError::ProgramVerifyError(msg));
4303}
4304
4305fn associate_orderings(o1: Ordering, o2: Ordering) -> Option<Ordering> {
4306    match (o1, o2) {
4307        (o1, o2) if o1 == o2 => Some(o1),
4308        (o, Ordering::Equal) | (Ordering::Equal, o) => Some(o),
4309        _ => None,
4310    }
4311}
4312
4313#[cfg(test)]
4314mod tests {
4315    use super::*;
4316    use std::collections::BTreeSet;
4317    use test_util::{assert_geq, assert_leq};
4318
4319    #[test]
4320    fn test_type_ordering() {
4321        let t0 = Type::from(0);
4322        let t1 = Type::from(1);
4323        let random = Type::AliasParameter { parameter_index: 8 };
4324        let unknown_written = Type::UNKNOWN_SCALAR;
4325        let unwritten = Type::default();
4326
4327        assert_eq!(t0.partial_cmp(&t0), Some(Ordering::Equal));
4328        assert_eq!(t0.partial_cmp(&t1), None);
4329        assert_eq!(t0.partial_cmp(&random), None);
4330        assert_eq!(t0.partial_cmp(&unknown_written), Some(Ordering::Less));
4331        assert_eq!(t0.partial_cmp(&unwritten), Some(Ordering::Less));
4332
4333        assert_eq!(t1.partial_cmp(&t0), None);
4334        assert_eq!(t1.partial_cmp(&t1), Some(Ordering::Equal));
4335        assert_eq!(t1.partial_cmp(&random), None);
4336        assert_eq!(t1.partial_cmp(&unknown_written), Some(Ordering::Less));
4337        assert_eq!(t1.partial_cmp(&unwritten), Some(Ordering::Less));
4338
4339        assert_eq!(random.partial_cmp(&t0), None);
4340        assert_eq!(random.partial_cmp(&t1), None);
4341        assert_eq!(random.partial_cmp(&random), Some(Ordering::Equal));
4342        assert_eq!(random.partial_cmp(&unknown_written), None);
4343        assert_eq!(random.partial_cmp(&unwritten), Some(Ordering::Less));
4344
4345        assert_eq!(unknown_written.partial_cmp(&t0), Some(Ordering::Greater));
4346        assert_eq!(unknown_written.partial_cmp(&t1), Some(Ordering::Greater));
4347        assert_eq!(unknown_written.partial_cmp(&random), None);
4348        assert_eq!(unknown_written.partial_cmp(&unknown_written), Some(Ordering::Equal));
4349        assert_eq!(unknown_written.partial_cmp(&unwritten), Some(Ordering::Less));
4350
4351        assert_eq!(unwritten.partial_cmp(&t0), Some(Ordering::Greater));
4352        assert_eq!(unwritten.partial_cmp(&t1), Some(Ordering::Greater));
4353        assert_eq!(unwritten.partial_cmp(&random), Some(Ordering::Greater));
4354        assert_eq!(unwritten.partial_cmp(&unknown_written), Some(Ordering::Greater));
4355        assert_eq!(unwritten.partial_cmp(&unwritten), Some(Ordering::Equal));
4356    }
4357
4358    #[test]
4359    fn test_stack_ordering() {
4360        let mut s1 = Stack::default();
4361        let mut s2 = Stack::default();
4362
4363        assert_eq!(s1.partial_cmp(&s2), Some(Ordering::Equal));
4364        s1.set(0, 0.into());
4365        assert_eq!(s1.partial_cmp(&s2), Some(Ordering::Less));
4366        assert_eq!(s2.partial_cmp(&s1), Some(Ordering::Greater));
4367        s2.set(1, 1.into());
4368        assert_eq!(s1.partial_cmp(&s2), None);
4369        assert_eq!(s2.partial_cmp(&s1), None);
4370    }
4371
4372    #[test]
4373    fn test_context_ordering() {
4374        let mut c1 = ComputationContext::default();
4375        let mut c2 = ComputationContext::default();
4376
4377        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Equal));
4378
4379        c1.array_bounds.insert(1.into(), 5);
4380        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Less));
4381        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Greater));
4382
4383        c2.array_bounds.insert(1.into(), 7);
4384        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4385        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4386
4387        c1.array_bounds.insert(2.into(), 9);
4388        assert_eq!(c1.partial_cmp(&c2), None);
4389        assert_eq!(c2.partial_cmp(&c1), None);
4390
4391        c2.array_bounds.insert(2.into(), 9);
4392        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4393        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4394
4395        c2.array_bounds.insert(3.into(), 12);
4396        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4397        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4398
4399        c1.pc = 8;
4400        assert_eq!(c1.partial_cmp(&c2), None);
4401        assert_eq!(c2.partial_cmp(&c1), None);
4402    }
4403
4404    #[test]
4405    fn test_stack_access() {
4406        let mut s = Stack::default();
4407
4408        // Store data in the range [8, 26) and verify that `read_data_ptr()` fails for any
4409        // reads outside of that range.
4410        assert!(s.store(StackOffset(8.into()), Type::UNKNOWN_SCALAR, DataWidth::U64).is_ok());
4411        assert!(s.store(StackOffset(16.into()), Type::UNKNOWN_SCALAR, DataWidth::U64).is_ok());
4412        assert!(s.store(StackOffset(24.into()), Type::UNKNOWN_SCALAR, DataWidth::U16).is_ok());
4413
4414        for offset in 0..32 {
4415            for end in (offset + 1)..32 {
4416                assert_eq!(
4417                    s.read_data_ptr(2, StackOffset(offset.into()), (end - offset) as u64).is_ok(),
4418                    offset >= 8 && end <= 26
4419                );
4420            }
4421        }
4422
4423        // Verify that overflows are handled properly.
4424        assert!(s.read_data_ptr(2, StackOffset(12.into()), u64::MAX - 2).is_err());
4425    }
4426
4427    #[test]
4428    fn test_compute_range_for_bytes_swap() {
4429        // Build a list of interesting values. Interesting values are all possible combination of
4430        // bytes being either 0, max value, or an intermediary value.
4431        let mut values = BTreeSet::<u64>::default();
4432        for v1 in &[0x00, 0x1, u64::MAX] {
4433            for v2 in &[0x00, 0x1, u64::MAX] {
4434                for v3 in &[0x00, 0x1, u64::MAX] {
4435                    values.insert(U64Range::assemble_slices((*v1, *v2, *v3), 1, 1));
4436                }
4437            }
4438        }
4439        // Replace the second byte of old by the first byte of new and return the result.
4440        let store = |old: u64, new: u64| (old & !0xff00) | ((new & 0xff) << 8);
4441
4442        for old in &values {
4443            for new in &values {
4444                let s = store(*old, *new);
4445                for min_old in values.iter().filter(|v| *v <= old) {
4446                    for min_new in values.iter().filter(|v| *v <= new) {
4447                        for max_old in values.iter().filter(|v| *v >= old) {
4448                            for max_new in values.iter().filter(|v| *v >= new) {
4449                                let range = U64Range::compute_range_for_bytes_swap(
4450                                    U64Range::new(*min_old, *max_old),
4451                                    U64Range::new(*min_new, *max_new),
4452                                    1,
4453                                    0,
4454                                    1,
4455                                );
4456                                assert_leq!(range.min, s);
4457                                assert_geq!(range.max, s);
4458                            }
4459                        }
4460                    }
4461                }
4462            }
4463        }
4464    }
4465}