ebpf/
verifier.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#![allow(unused_variables)]
6
7use crate::scalar_value::{ScalarValueData, U32Range, U32ScalarValueData, U64Range};
8use crate::visitor::{BpfVisitor, ProgramCounter, Register, Source};
9use crate::{
10    BPF_MAX_INSTS, BPF_PSEUDO_MAP_IDX, BPF_PSEUDO_MAP_IDX_VALUE, BPF_STACK_SIZE, DataWidth,
11    EbpfError, EbpfInstruction, GENERAL_REGISTER_COUNT, MapSchema, REGISTER_COUNT,
12};
13use byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian};
14use fuchsia_sync::Mutex;
15use linux_uapi::bpf_map_type_BPF_MAP_TYPE_ARRAY;
16use std::cmp::Ordering;
17use std::collections::{BTreeMap, HashMap, HashSet};
18use std::sync::Arc;
19use zerocopy::IntoBytes;
20
21const U32_MAX: u64 = u32::MAX as u64;
22
23/// A trait to receive the log from the verifier.
24pub trait VerifierLogger {
25    /// Log a line. The line is always a correct encoded ASCII string.
26    fn log(&mut self, line: &[u8]);
27}
28
29/// A `VerifierLogger` that drops all its content.
30pub struct NullVerifierLogger;
31
32impl VerifierLogger for NullVerifierLogger {
33    fn log(&mut self, line: &[u8]) {
34        debug_assert!(line.is_ascii());
35    }
36}
37
38/// An identifier for a memory buffer accessible by an ebpf program. The identifiers are built as a
39/// chain of unique identifier so that a buffer can contain multiple pointers to the same type and
40/// the verifier can distinguish between the different instances.
41#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
42pub struct MemoryId {
43    id: u64,
44    parent: Option<Box<MemoryId>>,
45}
46
47impl MemoryId {
48    pub fn id(&self) -> u64 {
49        self.id
50    }
51}
52
53impl From<u64> for MemoryId {
54    fn from(id: u64) -> Self {
55        Self { id, parent: None }
56    }
57}
58
59/// A counter that allows to generate new ids for parameters. The namespace is the same as for id
60/// generated for types while verifying an ebpf program, but it is started a u64::MAX / 2 and so is
61/// guaranteed to never collide because the number of instruction of an ebpf program are bounded.
62static BPF_TYPE_IDENTIFIER_COUNTER: std::sync::atomic::AtomicU64 =
63    std::sync::atomic::AtomicU64::new(u64::MAX / 2);
64
65impl MemoryId {
66    pub fn new() -> MemoryId {
67        Self {
68            id: BPF_TYPE_IDENTIFIER_COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
69            parent: None,
70        }
71    }
72
73    /// Build a new id such that `other` is prepended to the chain of parent of `self`.
74    fn prepended(&self, other: MemoryId) -> Self {
75        match &self.parent {
76            None => MemoryId { id: self.id, parent: Some(Box::new(other)) },
77            Some(parent) => {
78                MemoryId { id: self.id, parent: Some(Box::new(parent.prepended(other))) }
79            }
80        }
81    }
82
83    /// Returns whether the parent of `self` is `parent`
84    fn has_parent(&self, parent: &MemoryId) -> bool {
85        match &self.parent {
86            None => false,
87            Some(p) => p.as_ref() == parent,
88        }
89    }
90
91    /// Returns whether `self` and `other` represent the same type. This is true if both have the
92    /// same if and if they both have parent, parents must also match.
93    fn matches(&self, other: &MemoryId) -> bool {
94        if self.id != other.id {
95            return false;
96        };
97        match (&self.parent, &other.parent) {
98            (Some(p1), Some(p2)) => p1.matches(p2.as_ref()),
99            _ => true,
100        }
101    }
102}
103
104/// The type of a filed in a strcut pointed by `Type::PtrToStruct`.
105#[derive(Clone, Debug, PartialEq)]
106pub enum FieldType {
107    /// Read-only scalar value.
108    Scalar { size: usize },
109
110    /// Mutable scalar value.
111    MutableScalar { size: usize },
112
113    /// A pointer to the kernel memory. The full buffer is `buffer_size` bytes long.
114    PtrToMemory { is_32_bit: bool, id: MemoryId, buffer_size: usize },
115
116    /// A nullable pointer to the kernel memory. The full buffer is `buffer_size` bytes long.
117    NullablePtrToMemory { is_32_bit: bool, id: MemoryId, buffer_size: usize },
118
119    /// A pointer to the kernel memory. The full buffer size is determined by an instance of
120    /// `PtrToEndArray` with the same `id`.
121    PtrToArray { is_32_bit: bool, id: MemoryId },
122
123    /// A pointer to the kernel memory that represents the first non accessible byte of a
124    /// `PtrToArray` with the same `id`.
125    PtrToEndArray { is_32_bit: bool, id: MemoryId },
126}
127
128/// Definition of a field in a `Type::PtrToStruct`.
129#[derive(Clone, Debug, PartialEq)]
130pub struct FieldDescriptor {
131    /// The offset at which the field is located.
132    pub offset: usize,
133
134    /// The type of the pointed memory. Currently the verifier supports only `PtrToArray`,
135    /// `PtrToStruct`, `PtrToEndArray` and `ScalarValue`.
136    pub field_type: FieldType,
137}
138
139impl FieldDescriptor {
140    fn size(&self) -> usize {
141        match self.field_type {
142            FieldType::Scalar { size } | FieldType::MutableScalar { size } => size,
143            FieldType::PtrToMemory { is_32_bit, .. }
144            | FieldType::NullablePtrToMemory { is_32_bit, .. }
145            | FieldType::PtrToArray { is_32_bit, .. }
146            | FieldType::PtrToEndArray { is_32_bit, .. } => {
147                if is_32_bit {
148                    4
149                } else {
150                    8
151                }
152            }
153        }
154    }
155}
156
157/// The offset and width of a field in a struct.
158#[derive(Clone, Copy, Debug, Eq, PartialEq)]
159struct Field {
160    offset: i16,
161    width: DataWidth,
162}
163
164impl Field {
165    fn new(offset: i16, width: DataWidth) -> Self {
166        Self { offset, width }
167    }
168}
169
170/// Defines field layout in a struct pointed by `Type::PtrToStruct`.
171#[derive(Debug, PartialEq, Default)]
172pub struct StructDescriptor {
173    /// The list of fields.
174    pub fields: Vec<FieldDescriptor>,
175}
176
177impl StructDescriptor {
178    /// Return true if `self` is a subtype of `super_struct`.
179    fn is_subtype(&self, super_struct: &StructDescriptor) -> bool {
180        // Check that `super_struct.fields` is a subset of `fields`.
181        for super_field in super_struct.fields.iter() {
182            if self.fields.iter().find(|field| *field == super_field).is_none() {
183                return false;
184            }
185        }
186        true
187    }
188
189    /// Finds the field type for load/store at the specified location. None is returned if the
190    /// access is invalid and the program must be rejected.
191    fn find_field(&self, base_offset: ScalarValueData, field: Field) -> Option<&FieldDescriptor> {
192        let offset = base_offset + field.offset;
193        let field_desc = self.fields.iter().find(|f| {
194            f.offset <= offset.min() as usize && (offset.max() as usize) < f.offset + f.size()
195        })?;
196        let is_valid_load = match field_desc.field_type {
197            FieldType::Scalar { size } | FieldType::MutableScalar { size } => {
198                // For scalars check that the access is within the bounds of the field.
199                ((offset + field.width.bytes()).max() as usize) <= field_desc.offset + size
200            }
201            FieldType::PtrToMemory { is_32_bit, .. }
202            | FieldType::NullablePtrToMemory { is_32_bit, .. }
203            | FieldType::PtrToArray { is_32_bit, .. }
204            | FieldType::PtrToEndArray { is_32_bit, .. } => {
205                let expected_width = if is_32_bit { DataWidth::U32 } else { DataWidth::U64 };
206                // Pointer loads are expected to load the whole field.
207                offset.is_known()
208                    && offset.value as usize == field_desc.offset
209                    && field.width == expected_width
210            }
211        };
212
213        is_valid_load.then_some(field_desc)
214    }
215}
216
217#[derive(Clone, Debug, PartialEq)]
218pub enum MemoryParameterSize {
219    /// The memory buffer have the given size.
220    Value(u64),
221    /// The memory buffer size is given by the parameter in the given index.
222    Reference { index: u8 },
223}
224
225impl MemoryParameterSize {
226    fn size(&self, context: &ComputationContext) -> Result<u64, String> {
227        match self {
228            Self::Value(size) => Ok(*size),
229            Self::Reference { index } => {
230                let size_type = context.reg(index + 1)?;
231                match size_type {
232                    Type::ScalarValue(data) if data.is_known() => Ok(data.value),
233                    _ => Err("cannot know buffer size".to_string()),
234                }
235            }
236        }
237    }
238}
239
240#[derive(Clone, Debug, PartialEq)]
241pub enum Type {
242    /// A number.
243    ScalarValue(ScalarValueData),
244    /// A pointer to a map object.
245    ConstPtrToMap { id: u64, schema: MapSchema },
246    /// A pointer into the stack.
247    PtrToStack { offset: StackOffset },
248    /// A pointer to the kernel memory. The full buffer is `buffer_size` bytes long. The pointer is
249    /// situated at `offset` from the start of the buffer.
250    PtrToMemory { id: MemoryId, offset: ScalarValueData, buffer_size: u64 },
251    /// A pointer to a struct with the specified `StructDescriptor`.
252    PtrToStruct { id: MemoryId, offset: ScalarValueData, descriptor: Arc<StructDescriptor> },
253    /// A pointer to the kernel memory. The full buffer size is determined by an instance of
254    /// `PtrToEndArray` with the same `id`. The pointer is situadted at `offset` from the start of
255    /// the buffer.
256    PtrToArray { id: MemoryId, offset: ScalarValueData },
257    /// A pointer to the kernel memory that represents the first non accessible byte of a
258    /// `PtrToArray` with the same `id`.
259    PtrToEndArray { id: MemoryId },
260    /// A pointer that might be null and must be validated before being referenced.
261    NullOr { id: MemoryId, inner: Box<Type> },
262    /// An object that must be passed to a method with an associated `ReleaseParameter` before the
263    /// end of the program.
264    Releasable { id: MemoryId, inner: Box<Type> },
265    /// A function parameter that must be a `ScalarValue` when called.
266    ScalarValueParameter,
267    /// A function parameter that must be a `ConstPtrToMap` when called.
268    ConstPtrToMapParameter,
269    /// A function parameter that must be a key of a map.
270    MapKeyParameter {
271        /// The index in the arguments list that contains a `ConstPtrToMap` for the map this key is
272        /// associated with.
273        map_ptr_index: u8,
274    },
275    /// A function parameter that must be a value of a map.
276    MapValueParameter {
277        /// The index in the arguments list that contains a `ConstPtrToMap` for the map this key is
278        /// associated with.
279        map_ptr_index: u8,
280    },
281    /// A function parameter that must be a pointer to memory.
282    MemoryParameter {
283        /// The index in the arguments list that contains a scalar value containing the size of the
284        /// memory.
285        size: MemoryParameterSize,
286        /// Whether this memory is read by the function.
287        input: bool,
288        /// Whether this memory is written by the function.
289        output: bool,
290    },
291    /// A function return value that is the same type as a parameter.
292    AliasParameter {
293        /// The index in the argument list of the parameter that has the type of this return value.
294        parameter_index: u8,
295    },
296    /// A function return value that is either null, or the given type.
297    NullOrParameter(Box<Type>),
298    /// A function parameter that must be a pointer to memory with the given id.
299    StructParameter { id: MemoryId },
300    /// A function return value that must be passed to a method with an associated
301    /// `ReleaseParameter` before the end of the program.
302    ReleasableParameter { id: MemoryId, inner: Box<Type> },
303    /// A function parameter that will release the value.
304    ReleaseParameter { id: MemoryId },
305    /// A function parameter that is not checked.
306    AnyParameter,
307}
308
309/// Defines a partial ordering on `Type` instances, capturing the notion of how "broad"
310/// a type is in terms of the set of potential values it represents.
311///
312/// The ordering is defined such that `t1 > t2` if a proof that an eBPF program terminates
313/// in a state where a register or memory location has type `t1` is also a proof that
314/// the program terminates in a state where that location has type `t2`.
315///
316/// In other words, a "broader" type represents a larger set of possible values, and
317/// proving termination with a broader type implies termination with any narrower type.
318///
319/// Examples:
320/// * `Type::ScalarValue(ScalarValueData({ unknown_mask: 0, .. }))` (a known scalar value) is less than
321///   `Type::ScalarValue(ScalarValueData({ unknown_mask: u64::MAX, .. }))` (an unknown scalar value).
322impl PartialOrd for Type {
323    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
324        // If the values are equals, return the known result.
325        if self == other {
326            return Some(Ordering::Equal);
327        }
328
329        // If one value is not initialized, the types are ordered.
330        if self == &Type::UNINITIALIZED {
331            return Some(Ordering::Greater);
332        }
333        if other == &Type::UNINITIALIZED {
334            return Some(Ordering::Less);
335        }
336
337        // Otherwise, only scalars are comparables.
338        match (self, other) {
339            (Self::ScalarValue(data1), Self::ScalarValue(data2)) => data1.partial_cmp(data2),
340            _ => None,
341        }
342    }
343}
344
345impl From<ScalarValueData> for Type {
346    fn from(value: ScalarValueData) -> Self {
347        Self::ScalarValue(value)
348    }
349}
350
351impl From<u64> for Type {
352    fn from(value: u64) -> Self {
353        Self::ScalarValue(value.into())
354    }
355}
356
357impl Default for Type {
358    /// A new instance of `Type` where no bit has been written yet.
359    fn default() -> Self {
360        Self::UNINITIALIZED.clone()
361    }
362}
363
364impl Type {
365    /// An uninitialized value.
366    pub const UNINITIALIZED: Self = Self::ScalarValue(ScalarValueData::UNINITIALIZED);
367
368    /// A `Type` where the data is usable by userspace, but the value is not known by the verifier.
369    pub const UNKNOWN_SCALAR: Self = Self::ScalarValue(ScalarValueData::UNKNOWN_WRITTEN);
370
371    /// The mask associated with a data of size `width`.
372    fn mask(width: DataWidth) -> u64 {
373        if width == DataWidth::U64 { u64::MAX } else { (1 << width.bits()) - 1 }
374    }
375
376    /// Return true if `self` is a scalar with all bytes initialized.
377    fn is_written_scalar(&self) -> bool {
378        match self {
379            Self::ScalarValue(data) if data.is_fully_initialized() => true,
380            _ => false,
381        }
382    }
383
384    /// Return true if `self` is a subtype of `super_type`.
385    pub fn is_subtype(&self, super_type: &Type) -> bool {
386        match (self, super_type) {
387            // Anything can be used in place of an uninitialized value.
388            (_, Self::ScalarValue(data)) if data.is_uninitialized() => true,
389
390            (
391                Self::PtrToStruct { id: id1, offset: offset1, descriptor: descriptor1 },
392                Self::PtrToStruct { id: id2, offset: offset2, descriptor: descriptor2 },
393            ) => id1 == id2 && offset1 <= offset2 && descriptor1.is_subtype(descriptor2),
394
395            // Every type is a subtype of itself.
396            (self_type, super_type) if self_type == super_type => true,
397
398            _ => false,
399        }
400    }
401
402    /// Return true is `self` is guaranteed to be non-zero
403    pub fn is_non_zero(&self) -> bool {
404        match self {
405            Self::ScalarValue(d) => d.min() > 0,
406            Self::NullOr { .. } => false,
407            _ => true,
408        }
409    }
410
411    fn inner(&self, context: &ComputationContext) -> Result<&Type, String> {
412        match self {
413            Self::Releasable { id, inner } => {
414                if context.resources.contains(id) {
415                    Ok(&inner)
416                } else {
417                    Err("Access to released resource".to_string())
418                }
419            }
420            _ => Ok(self),
421        }
422    }
423
424    /// Constraints `type1` and `type2` for a conditional jump with the specified `jump_type` and
425    /// `jump_width`.
426    fn constraint(
427        context: &mut ComputationContext,
428        jump_type: JumpType,
429        jump_width: JumpWidth,
430        type1: Self,
431        type2: Self,
432    ) -> Result<(Self, Self), String> {
433        let result = match (jump_width, jump_type, type1.inner(context)?, type2.inner(context)?) {
434            (JumpWidth::W64, JumpType::Eq, Type::ScalarValue(data1), Type::ScalarValue(data2))
435                if data1.is_fully_initialized() && data2.is_fully_initialized() =>
436            {
437                let umin = std::cmp::max(data1.min(), data2.min());
438                let umax = std::cmp::min(data1.max(), data2.max());
439                let v = Type::ScalarValue(ScalarValueData::new(
440                    data1.value | data2.value,
441                    data1.unknown_mask & data2.unknown_mask,
442                    0,
443                    U64Range::new(umin, umax),
444                ));
445                (v.clone(), v)
446            }
447            (JumpWidth::W32, JumpType::Eq, Type::ScalarValue(data1), Type::ScalarValue(data2))
448                if data1.is_fully_initialized() && data2.is_fully_initialized() =>
449            {
450                let maybe_umin = if data1.min() <= U32_MAX && data2.min() <= U32_MAX {
451                    Some(std::cmp::max(data1.min(), data2.min()))
452                } else {
453                    None
454                };
455                let maybe_umax = if data1.max() <= U32_MAX && data2.max() <= U32_MAX {
456                    Some(std::cmp::min(data1.max(), data2.max()))
457                } else {
458                    None
459                };
460
461                let urange1 = U64Range::new(
462                    maybe_umin.unwrap_or_else(|| data1.min()),
463                    maybe_umax.unwrap_or_else(|| data1.max()),
464                );
465                let v1 = Type::ScalarValue(ScalarValueData::new(
466                    data1.value | (data2.value & U32_MAX),
467                    data1.unknown_mask & (data2.unknown_mask | (U32_MAX << 32)),
468                    0,
469                    urange1,
470                ));
471
472                let urange2 = U64Range::new(
473                    maybe_umin.unwrap_or_else(|| data2.min()),
474                    maybe_umax.unwrap_or_else(|| data2.max()),
475                );
476                let v2 = Type::ScalarValue(ScalarValueData::new(
477                    data2.value | (data1.value & U32_MAX),
478                    data2.unknown_mask & (data1.unknown_mask | (U32_MAX << 32)),
479                    0,
480                    urange2,
481                ));
482                (v1, v2)
483            }
484            (JumpWidth::W64, JumpType::Eq, Type::ScalarValue(data), Type::NullOr { id, .. })
485            | (JumpWidth::W64, JumpType::Eq, Type::NullOr { id, .. }, Type::ScalarValue(data))
486                if data.is_zero() =>
487            {
488                context.set_null(id, true);
489                let zero = Type::from(0);
490                (zero.clone(), zero)
491            }
492            (JumpWidth::W64, jump_type, Type::NullOr { id, inner }, Type::ScalarValue(data))
493                if jump_type.is_strict() && data.is_zero() =>
494            {
495                context.set_null(id, false);
496                let inner = *inner.clone();
497                inner.register_resource(context);
498                (inner, type2)
499            }
500            (JumpWidth::W64, jump_type, Type::ScalarValue(data), Type::NullOr { id, inner })
501                if jump_type.is_strict() && data.is_zero() =>
502            {
503                context.set_null(id, false);
504                let inner = *inner.clone();
505                inner.register_resource(context);
506                (type1, inner)
507            }
508
509            (JumpWidth::W64, JumpType::Lt, Type::ScalarValue(data1), Type::ScalarValue(data2))
510            | (JumpWidth::W64, JumpType::Gt, Type::ScalarValue(data2), Type::ScalarValue(data1)) => {
511                debug_assert!(data1.min() < u64::MAX);
512                debug_assert!(data2.max() > 0);
513                let new_max1 = std::cmp::min(data1.max(), data2.max() - 1);
514                debug_assert!(data1.min() <= new_max1);
515                let new_min2 = std::cmp::max(data2.min(), data1.min() + 1);
516                debug_assert!(new_min2 <= data2.max());
517                let new_range1 = U64Range::new(data1.min(), new_max1);
518                let new_range2 = U64Range::new(new_min2, data2.max());
519                (data1.update_range(new_range1).into(), data2.update_range(new_range2).into())
520            }
521
522            (JumpWidth::W64, JumpType::Le, Type::ScalarValue(data1), Type::ScalarValue(data2))
523            | (JumpWidth::W64, JumpType::Ge, Type::ScalarValue(data2), Type::ScalarValue(data1)) => {
524                let new_max1 = std::cmp::min(data1.max(), data2.max());
525                debug_assert!(data1.min() <= new_max1);
526                let new_min2 = std::cmp::max(data2.min(), data1.min());
527                debug_assert!(new_min2 <= data2.max());
528                let new_range1 = U64Range::new(data1.min(), new_max1);
529                let new_range2 = U64Range::new(new_min2, data2.max());
530                (data1.update_range(new_range1).into(), data2.update_range(new_range2).into())
531            }
532
533            (
534                JumpWidth::W64,
535                JumpType::Eq,
536                Type::PtrToArray { id: id1, offset },
537                Type::PtrToEndArray { id: id2 },
538            )
539            | (
540                JumpWidth::W64,
541                JumpType::Le,
542                Type::PtrToArray { id: id1, offset },
543                Type::PtrToEndArray { id: id2 },
544            )
545            | (
546                JumpWidth::W64,
547                JumpType::Ge,
548                Type::PtrToEndArray { id: id1 },
549                Type::PtrToArray { id: id2, offset },
550            ) if id1 == id2 => {
551                context.update_array_bounds(id1.clone(), *offset);
552                (type1, type2)
553            }
554            (
555                JumpWidth::W64,
556                JumpType::Lt,
557                Type::PtrToArray { id: id1, offset },
558                Type::PtrToEndArray { id: id2 },
559            )
560            | (
561                JumpWidth::W64,
562                JumpType::Gt,
563                Type::PtrToEndArray { id: id1 },
564                Type::PtrToArray { id: id2, offset },
565            ) if id1 == id2 => {
566                context.update_array_bounds(id1.clone(), *offset + 1);
567                (type1, type2)
568            }
569            (JumpWidth::W64, JumpType::Eq, _, _) => (type1.clone(), type1),
570            _ => (type1, type2),
571        };
572        Ok(result)
573    }
574
575    fn match_parameter_type(
576        &self,
577        context: &ComputationContext,
578        parameter_type: &Type,
579        index: usize,
580        next: &mut ComputationContext,
581    ) -> Result<(), String> {
582        match (parameter_type, self) {
583            (Type::NullOrParameter(t), Type::ScalarValue(data))
584                if data.is_known() && data.value == 0 =>
585            {
586                Ok(())
587            }
588            (Type::NullOrParameter(t), _) => self.match_parameter_type(context, t, index, next),
589            (Type::ScalarValueParameter, Type::ScalarValue(data))
590                if data.is_fully_initialized() =>
591            {
592                Ok(())
593            }
594            (Type::ConstPtrToMapParameter, Type::ConstPtrToMap { .. }) => Ok(()),
595            (
596                Type::MapKeyParameter { map_ptr_index },
597                Type::PtrToMemory { offset, buffer_size, .. },
598            ) => {
599                let schema = context.get_map_schema(*map_ptr_index)?;
600                context.check_memory_access(*offset, *buffer_size, 0, schema.key_size as usize)
601            }
602            (Type::MapKeyParameter { map_ptr_index }, Type::PtrToStack { offset }) => {
603                let schema = context.get_map_schema(*map_ptr_index)?;
604                context.stack.read_data_ptr(context.pc, *offset, schema.key_size as u64)
605            }
606            (
607                Type::MapValueParameter { map_ptr_index },
608                Type::PtrToMemory { offset, buffer_size, .. },
609            ) => {
610                let schema = context.get_map_schema(*map_ptr_index)?;
611                context.check_memory_access(*offset, *buffer_size, 0, schema.value_size as usize)
612            }
613            (Type::MapValueParameter { map_ptr_index }, Type::PtrToStack { offset }) => {
614                let schema = context.get_map_schema(*map_ptr_index)?;
615                context.stack.read_data_ptr(context.pc, *offset, schema.value_size as u64)
616            }
617            (Type::MemoryParameter { size, .. }, Type::PtrToMemory { offset, buffer_size, .. }) => {
618                let expected_size = size.size(context)?;
619                let size_left = ScalarValueData::from(*buffer_size) - offset;
620                if expected_size > size_left.min() {
621                    return Err("out of bound read".to_string());
622                }
623                Ok(())
624            }
625
626            (Type::MemoryParameter { size, input, output }, Type::PtrToStack { offset }) => {
627                let size = size.size(context)?;
628                let buffer_end = offset.add(size);
629                if !buffer_end.is_valid() {
630                    Err("out of bound access".to_string())
631                } else {
632                    if *output {
633                        next.stack.write_data_ptr(context.pc, *offset, size)?;
634                    }
635                    if *input {
636                        context.stack.read_data_ptr(context.pc, *offset, size)?;
637                    }
638                    Ok(())
639                }
640            }
641            (
642                Type::StructParameter { id: id1 },
643                Type::PtrToMemory { id: id2, offset, .. }
644                | Type::PtrToStruct { id: id2, offset, .. },
645            ) if offset.is_zero() && id1.matches(id2) => Ok(()),
646            (
647                Type::ReleasableParameter { id: id1, inner: inner1 },
648                Type::Releasable { id: id2, inner: inner2 },
649            ) if id2.has_parent(id1) => {
650                if next.resources.contains(id2) {
651                    inner2.match_parameter_type(context, inner1, index, next)
652                } else {
653                    Err(format!("Resource already released for index {index}"))
654                }
655            }
656            (Type::ReleaseParameter { id: id1 }, Type::Releasable { id: id2, .. })
657                if id2.has_parent(id1) =>
658            {
659                if next.resources.remove(id2) {
660                    Ok(())
661                } else {
662                    Err(format!("{id2:?} Resource already released for index {index}"))
663                }
664            }
665            (_, Type::Releasable { inner, .. }) => {
666                inner.match_parameter_type(context, parameter_type, index, next)
667            }
668            (Type::AnyParameter, _) => Ok(()),
669
670            _ => Err(format!("incorrect parameter for index {index}")),
671        }
672    }
673
674    /// If this `Type` is an instance of NullOr with the given `null_id`, replace it wither either
675    /// 0 or the subtype depending on `is_null`
676    fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
677        match self {
678            Type::NullOr { id, inner } if id == null_id => {
679                if is_null {
680                    *self = Type::from(0);
681                } else {
682                    *self = *inner.clone();
683                }
684            }
685            _ => {}
686        }
687    }
688
689    fn register_resource(&self, context: &mut ComputationContext) {
690        match self {
691            Type::Releasable { id, .. } => {
692                context.resources.insert(id.clone());
693            }
694            _ => {}
695        }
696    }
697
698    /// Partially Compares two iterators of comparable items.
699    ///
700    /// This function iterates through both input iterators simultaneously and compares the corresponding elements.
701    /// The comparison continues until:
702    /// 1. Both iterators are exhausted and all elements were considered equal, in which case it returns `Some(Ordering::Equal)`.
703    /// 2. All pairs of corresponding elements that are not equal have the same ordering (`Ordering::Less` or `Ordering::Greater`), in which case it returns `Some(Ordering)` reflecting that consistent ordering.
704    /// 3. One iterator is exhausted before the other, or any comparison between elements yields `None`, or not all non-equal pairs have the same ordering, in which case it returns `None`.
705    fn compare_list<'a>(
706        mut l1: impl Iterator<Item = &'a Self>,
707        mut l2: impl Iterator<Item = &'a Self>,
708    ) -> Option<Ordering> {
709        let mut result = Ordering::Equal;
710        loop {
711            match (l1.next(), l2.next()) {
712                (None, None) => return Some(result),
713                (_, None) | (None, _) => return None,
714                (Some(v1), Some(v2)) => {
715                    result = associate_orderings(result, v1.partial_cmp(v2)?)?;
716                }
717            }
718        }
719    }
720}
721
722#[derive(Clone, Debug)]
723pub struct FunctionSignature {
724    pub args: Vec<Type>,
725    pub return_value: Type,
726    pub invalidate_array_bounds: bool,
727}
728
729#[derive(Debug, Default)]
730pub struct CallingContext {
731    /// List of map schemas of the associated map. The maps can be accessed using LDDW instruction
732    /// with `src_reg=BPF_PSEUDO_MAP_IDX`.
733    pub maps: Vec<MapSchema>,
734    /// The registered external functions.
735    pub helpers: HashMap<u32, FunctionSignature>,
736    /// The args of the program.
737    pub args: Vec<Type>,
738    /// Packet type. Normally it should be either `None` or `args[0]`.
739    pub packet_type: Option<Type>,
740}
741
742impl CallingContext {
743    pub fn register_map(&mut self, schema: MapSchema) -> usize {
744        let index = self.maps.len();
745        self.maps.push(schema);
746        index
747    }
748    pub fn set_helpers(&mut self, helpers: HashMap<u32, FunctionSignature>) {
749        self.helpers = helpers;
750    }
751    pub fn set_args(&mut self, args: &[Type]) {
752        debug_assert!(args.len() <= 5);
753        self.args = args.to_vec();
754    }
755    pub fn set_packet_type(&mut self, packet_type: Type) {
756        self.packet_type = Some(packet_type);
757    }
758}
759
760#[derive(Debug, PartialEq, Clone)]
761pub struct StructAccess {
762    pub pc: ProgramCounter,
763
764    // Memory Id of the struct being accessed.
765    pub memory_id: MemoryId,
766
767    // Offset of the field being loaded.
768    pub field_offset: usize,
769
770    // Indicates that this is a 32-bit pointer load. These instructions must be remapped.
771    pub is_32_bit_ptr_load: bool,
772}
773
774#[derive(Debug, Clone)]
775pub struct VerifiedEbpfProgram {
776    pub(crate) code: Vec<EbpfInstruction>,
777    pub(crate) args: Vec<Type>,
778    pub(crate) struct_access_instructions: Vec<StructAccess>,
779    pub(crate) maps: Vec<MapSchema>,
780}
781
782impl VerifiedEbpfProgram {
783    // Convert the program to raw code. Can be used only when the program doesn't access any
784    // structs and maps.
785    pub fn to_code(self) -> Vec<EbpfInstruction> {
786        debug_assert!(self.struct_access_instructions.is_empty());
787        debug_assert!(self.maps.is_empty());
788        self.code
789    }
790
791    pub fn code(&self) -> &[EbpfInstruction] {
792        &self.code
793    }
794
795    pub fn struct_access_instructions(&self) -> &[StructAccess] {
796        &self.struct_access_instructions
797    }
798
799    pub fn from_verified_code(
800        code: Vec<EbpfInstruction>,
801        args: Vec<Type>,
802        struct_access_instructions: Vec<StructAccess>,
803        maps: Vec<MapSchema>,
804    ) -> Self {
805        Self { code, args, struct_access_instructions, maps }
806    }
807
808    pub fn maps(&self) -> &[MapSchema] {
809        &self.maps
810    }
811}
812
813/// Verify the given code depending on the type of the parameters and the registered external
814/// functions. Returned `VerifiedEbpfProgram` should be linked in order to execute it.
815pub fn verify_program(
816    code: Vec<EbpfInstruction>,
817    calling_context: CallingContext,
818    logger: &mut dyn VerifierLogger,
819) -> Result<VerifiedEbpfProgram, EbpfError> {
820    if code.len() > BPF_MAX_INSTS {
821        return error_and_log(logger, "ebpf program too long");
822    }
823
824    let mut context = ComputationContext::default();
825    for (i, t) in calling_context.args.iter().enumerate() {
826        // The parameter registers are r1 to r5.
827        context.set_reg((i + 1) as u8, t.clone()).map_err(EbpfError::ProgramVerifyError)?;
828    }
829    let states = vec![context];
830    let mut verification_context = VerificationContext {
831        calling_context,
832        logger,
833        states,
834        code: &code,
835        counter: 0,
836        iteration: 0,
837        terminating_contexts: Default::default(),
838        struct_access_instructions: Default::default(),
839    };
840    while let Some(mut context) = verification_context.states.pop() {
841        if let Some(terminating_contexts) =
842            verification_context.terminating_contexts.get(&context.pc)
843        {
844            // Check whether there exist a context that terminate and prove that this context does
845            // also terminate.
846            if let Some(ending_context) =
847                terminating_contexts.iter().find(|c| c.computation_context >= context)
848            {
849                // One such context has been found, this proves the current context terminates.
850                // If the context has a parent, register the data dependencies and try to terminate
851                // it.
852                if let Some(parent) = context.parent.take() {
853                    parent.dependencies.lock().push(ending_context.dependencies.clone());
854                    if let Some(parent) = Arc::into_inner(parent) {
855                        parent
856                            .terminate(&mut verification_context)
857                            .map_err(EbpfError::ProgramVerifyError)?;
858                    }
859                }
860                continue;
861            }
862        }
863        if verification_context.iteration > 10 * BPF_MAX_INSTS {
864            return error_and_log(verification_context.logger, "bpf byte code does not terminate");
865        }
866        if context.pc >= code.len() {
867            return error_and_log(verification_context.logger, "pc out of bounds");
868        }
869        let visit_result = context.visit(&mut verification_context, code[context.pc]);
870        match visit_result {
871            Err(message) => {
872                let message = format!("at PC {}: {}", context.pc, message);
873                return error_and_log(verification_context.logger, message);
874            }
875            _ => {}
876        }
877        verification_context.iteration += 1;
878    }
879
880    let struct_access_instructions =
881        verification_context.struct_access_instructions.into_values().collect::<Vec<_>>();
882    let CallingContext { maps, args, .. } = verification_context.calling_context;
883    Ok(VerifiedEbpfProgram { code, struct_access_instructions, maps, args })
884}
885
886struct VerificationContext<'a> {
887    /// The type information for the program arguments and the registered functions.
888    calling_context: CallingContext,
889    /// The logger to use.
890    logger: &'a mut dyn VerifierLogger,
891    /// The `ComputationContext` yet to be validated.
892    states: Vec<ComputationContext>,
893    /// The program being analyzed.
894    code: &'a [EbpfInstruction],
895    /// A counter used to generated unique ids for memory buffers and maps.
896    counter: u64,
897    /// The current iteration of the verifier. Used to ensure termination by limiting the number of
898    /// iteration before bailing out.
899    iteration: usize,
900    /// Keep track of the context that terminates at a given pc. The list of context will all be
901    /// incomparables as each time a bigger context is computed, the smaller ones are removed from
902    /// the list.
903    terminating_contexts: BTreeMap<ProgramCounter, Vec<TerminatingContext>>,
904    /// The current set of struct access instructions that will need to be updated when the
905    /// program is linked. This is also used to ensure that a given instruction always loads the
906    /// same field. If this is not the case, the verifier will reject the program.
907    struct_access_instructions: HashMap<ProgramCounter, StructAccess>,
908}
909
910impl<'a> VerificationContext<'a> {
911    fn next_id(&mut self) -> u64 {
912        let id = self.counter;
913        self.counter += 1;
914        id
915    }
916
917    /// Register an instruction that loads or stores a struct field. These instructions will need
918    /// to updated later when the program is linked.
919    fn register_struct_access(&mut self, struct_access: StructAccess) -> Result<(), String> {
920        match self.struct_access_instructions.entry(struct_access.pc) {
921            std::collections::hash_map::Entry::Vacant(entry) => {
922                entry.insert(struct_access);
923            }
924            std::collections::hash_map::Entry::Occupied(entry) => {
925                if *entry.get() != struct_access {
926                    return Err("Inconsistent struct field access".to_string());
927                }
928            }
929        }
930        Ok(())
931    }
932}
933
934const STACK_ELEMENT_SIZE: usize = std::mem::size_of::<u64>();
935const STACK_MAX_INDEX: usize = BPF_STACK_SIZE / STACK_ELEMENT_SIZE;
936
937/// An offset inside the stack. The offset is from the end of the stack.
938/// downward.
939#[derive(Clone, Copy, Debug, PartialEq)]
940pub struct StackOffset(ScalarValueData);
941
942impl Default for StackOffset {
943    fn default() -> Self {
944        Self(BPF_STACK_SIZE.into())
945    }
946}
947
948impl StackOffset {
949    /// Whether the current offset is valid.
950    fn is_valid(&self) -> bool {
951        self.0.is_known() && self.0.value <= (BPF_STACK_SIZE as u64)
952    }
953
954    /// The value of the register.
955    fn reg(&self) -> ScalarValueData {
956        self.0
957    }
958
959    /// The index into the stack array this offset points to. Can be called only if `is_valid()`
960    /// is true.
961    fn array_index(&self) -> usize {
962        debug_assert!(self.is_valid());
963        usize::try_from(self.0.value).unwrap() / STACK_ELEMENT_SIZE
964    }
965
966    /// The offset inside the aligned u64 in the stack.
967    fn sub_index(&self) -> usize {
968        debug_assert!(self.is_valid());
969        usize::try_from(self.0.value).unwrap() % STACK_ELEMENT_SIZE
970    }
971
972    fn add<T: Into<ScalarValueData>>(self, rhs: T) -> Self {
973        Self(self.0 + rhs)
974    }
975}
976
977/// The state of the stack
978#[derive(Clone, Debug, Default, PartialEq)]
979struct Stack {
980    data: HashMap<usize, Type>,
981}
982
983impl Stack {
984    /// Replace all instances of the NullOr type with the given `null_id` to either 0 or the
985    /// subtype depending on `is_null`
986    fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
987        for (_, t) in self.data.iter_mut() {
988            t.set_null(null_id, is_null);
989        }
990    }
991
992    fn get(&self, index: usize) -> &Type {
993        self.data.get(&index).unwrap_or(&Type::UNINITIALIZED)
994    }
995
996    fn set(&mut self, index: usize, t: Type) {
997        if t == Type::UNINITIALIZED {
998            self.data.remove(&index);
999        } else {
1000            self.data.insert(index, t);
1001        }
1002    }
1003
1004    fn extract_sub_value(value: u64, offset: usize, byte_count: usize) -> u64 {
1005        NativeEndian::read_uint(&value.as_bytes()[offset..], byte_count)
1006    }
1007
1008    fn insert_sub_value(mut original: u64, value: u64, width: DataWidth, offset: usize) -> u64 {
1009        let byte_count = width.bytes();
1010        let original_buf = original.as_mut_bytes();
1011        let value_buf = value.as_bytes();
1012        original_buf[offset..(byte_count + offset)].copy_from_slice(&value_buf[..byte_count]);
1013        original
1014    }
1015
1016    fn write_data_ptr(
1017        &mut self,
1018        pc: ProgramCounter,
1019        mut offset: StackOffset,
1020        bytes: u64,
1021    ) -> Result<(), String> {
1022        for i in 0..bytes {
1023            self.store(offset, Type::UNKNOWN_SCALAR, DataWidth::U8)?;
1024            offset = offset.add(1);
1025        }
1026        Ok(())
1027    }
1028
1029    fn read_data_ptr(
1030        &self,
1031        pc: ProgramCounter,
1032        offset: StackOffset,
1033        bytes: u64,
1034    ) -> Result<(), String> {
1035        let read_element =
1036            |index: usize, start_offset: usize, end_offset: usize| -> Result<(), String> {
1037                match self.get(index) {
1038                    Type::ScalarValue(data) => {
1039                        debug_assert!(end_offset > start_offset);
1040                        let unwritten_bits = Self::extract_sub_value(
1041                            data.unwritten_mask,
1042                            start_offset,
1043                            end_offset - start_offset,
1044                        );
1045                        if unwritten_bits == 0 {
1046                            Ok(())
1047                        } else {
1048                            Err("reading unwritten value from the stack".to_string())
1049                        }
1050                    }
1051                    _ => Err("invalid read from the stack".to_string()),
1052                }
1053            };
1054        if bytes == 0 {
1055            return Ok(());
1056        }
1057
1058        if bytes as usize > BPF_STACK_SIZE {
1059            return Err("stack overflow".to_string());
1060        }
1061
1062        if !offset.is_valid() {
1063            return Err("invalid stack offset".to_string());
1064        }
1065
1066        let end_offset = offset.add(bytes);
1067        if !end_offset.is_valid() {
1068            return Err("stack overflow".to_string())?;
1069        }
1070
1071        // Handle the case where all the data is contained in a single element excluding the last
1072        // byte (the case when the read ends at an element edge, i.e. `end_offset.sub_index()==0`,
1073        // is covered by the default path below).
1074        if offset.array_index() == end_offset.array_index() {
1075            return read_element(offset.array_index(), offset.sub_index(), end_offset.sub_index());
1076        }
1077
1078        // Handle the first element, that might be partial.
1079        read_element(offset.array_index(), offset.sub_index(), STACK_ELEMENT_SIZE)?;
1080
1081        // Handle the last element, that might be partial.
1082        if end_offset.sub_index() != 0 {
1083            read_element(end_offset.array_index(), 0, end_offset.sub_index())?;
1084        }
1085
1086        // Handle the any full type between beginning and end.
1087        for i in (offset.array_index() + 1)..end_offset.array_index() {
1088            read_element(i, 0, STACK_ELEMENT_SIZE)?;
1089        }
1090
1091        Ok(())
1092    }
1093
1094    fn store(&mut self, offset: StackOffset, value: Type, width: DataWidth) -> Result<(), String> {
1095        if !offset.is_valid() {
1096            return Err("out of bounds store".to_string());
1097        }
1098        if offset.sub_index() % width.bytes() != 0 {
1099            return Err("misaligned access".to_string());
1100        }
1101
1102        let index = offset.array_index();
1103        if width == DataWidth::U64 {
1104            self.set(index, value);
1105        } else {
1106            match value {
1107                Type::ScalarValue(data) => {
1108                    let old_data = match self.get(index) {
1109                        Type::ScalarValue(data) => *data,
1110                        _ => {
1111                            // The value in the stack is not a scalar. Let consider it an scalar
1112                            // value with no written bits.
1113                            ScalarValueData::UNINITIALIZED
1114                        }
1115                    };
1116                    let sub_index = offset.sub_index();
1117                    let value =
1118                        Self::insert_sub_value(old_data.value, data.value, width, sub_index);
1119                    let unknown_mask = Self::insert_sub_value(
1120                        old_data.unknown_mask,
1121                        data.unknown_mask,
1122                        width,
1123                        sub_index,
1124                    );
1125                    let unwritten_mask = Self::insert_sub_value(
1126                        old_data.unwritten_mask,
1127                        data.unwritten_mask,
1128                        width,
1129                        sub_index,
1130                    );
1131                    let urange = U64Range::compute_range_for_bytes_swap(
1132                        old_data.urange,
1133                        data.urange,
1134                        sub_index,
1135                        0,
1136                        width.bytes(),
1137                    );
1138                    self.set(
1139                        index,
1140                        Type::ScalarValue(ScalarValueData::new(
1141                            value,
1142                            unknown_mask,
1143                            unwritten_mask,
1144                            urange,
1145                        )),
1146                    );
1147                }
1148                _ => {
1149                    return Err("cannot store part of a non scalar value on the stack".to_string());
1150                }
1151            }
1152        }
1153        Ok(())
1154    }
1155
1156    fn load(&self, offset: StackOffset, width: DataWidth) -> Result<Type, String> {
1157        if !offset.is_valid() {
1158            return Err("out of bounds load".to_string());
1159        }
1160        if offset.array_index() >= STACK_MAX_INDEX {
1161            return Err("out of bounds load".to_string());
1162        }
1163        if offset.sub_index() % width.bytes() != 0 {
1164            return Err("misaligned access".to_string());
1165        }
1166
1167        let index = offset.array_index();
1168        let loaded_type = self.get(index).clone();
1169        if width == DataWidth::U64 {
1170            Ok(loaded_type)
1171        } else {
1172            match loaded_type {
1173                Type::ScalarValue(data) => {
1174                    let sub_index = offset.sub_index();
1175                    let value = Self::extract_sub_value(data.value, sub_index, width.bytes());
1176                    let unknown_mask =
1177                        Self::extract_sub_value(data.unknown_mask, sub_index, width.bytes());
1178                    let unwritten_mask =
1179                        Self::extract_sub_value(data.unwritten_mask, sub_index, width.bytes());
1180                    let urange = U64Range::compute_range_for_bytes_swap(
1181                        0.into(),
1182                        data.urange,
1183                        0,
1184                        sub_index,
1185                        width.bytes(),
1186                    );
1187                    Ok(Type::ScalarValue(ScalarValueData::new(
1188                        value,
1189                        unknown_mask,
1190                        unwritten_mask,
1191                        urange,
1192                    )))
1193                }
1194                _ => Err(format!("incorrect load of {} bytes", width.bytes())),
1195            }
1196        }
1197    }
1198}
1199
1200/// Two types are ordered with `t1` > `t2` if a proof that a program in a state `t1` finish is also
1201/// a proof that a program in a state `t2` finish.
1202impl PartialOrd for Stack {
1203    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1204        let mut result = Ordering::Equal;
1205        let mut data_iter1 = self.data.iter().peekable();
1206        let mut data_iter2 = other.data.iter().peekable();
1207        loop {
1208            let k1 = data_iter1.peek().map(|(k, _)| *k);
1209            let k2 = data_iter2.peek().map(|(k, _)| *k);
1210            let k = match (k1, k2) {
1211                (None, None) => return Some(result),
1212                (Some(k), None) => {
1213                    data_iter1.next();
1214                    *k
1215                }
1216                (None, Some(k)) => {
1217                    data_iter2.next();
1218                    *k
1219                }
1220                (Some(k1), Some(k2)) => {
1221                    if k1 <= k2 {
1222                        data_iter1.next();
1223                    }
1224                    if k2 <= k1 {
1225                        data_iter2.next();
1226                    }
1227                    *std::cmp::min(k1, k2)
1228                }
1229            };
1230            result = associate_orderings(result, self.get(k).partial_cmp(other.get(k))?)?;
1231        }
1232    }
1233}
1234
1235macro_rules! bpf_log {
1236    ($context:ident, $verification_context:ident, $($msg:tt)*) => {
1237        let prefix = format!("{}: ({:02x})", $context.pc, $verification_context.code[$context.pc].code());
1238        let suffix = format!($($msg)*);
1239        $verification_context.logger.log(format!("{prefix} {suffix}").as_bytes());
1240    }
1241}
1242
1243/// The state of the computation as known by the verifier at a given point in time.
1244#[derive(Debug, Default)]
1245struct ComputationContext {
1246    /// The program counter.
1247    pc: ProgramCounter,
1248    /// Register 0 to 9.
1249    registers: [Type; GENERAL_REGISTER_COUNT as usize],
1250    /// The state of the stack.
1251    stack: Stack,
1252    /// The dynamically known bounds of buffers indexed by their ids.
1253    array_bounds: BTreeMap<MemoryId, u64>,
1254    /// The currently allocated resources.
1255    resources: HashSet<MemoryId>,
1256    /// The previous context in the computation.
1257    parent: Option<Arc<ComputationContext>>,
1258    /// The data dependencies of this context. This is used to broaden a known ending context to
1259    /// help cutting computation branches.
1260    dependencies: Mutex<Vec<DataDependencies>>,
1261}
1262
1263impl Clone for ComputationContext {
1264    fn clone(&self) -> Self {
1265        Self {
1266            pc: self.pc,
1267            registers: self.registers.clone(),
1268            stack: self.stack.clone(),
1269            array_bounds: self.array_bounds.clone(),
1270            resources: self.resources.clone(),
1271            parent: self.parent.clone(),
1272            // dependencies are erased as they must always be used on the same instance of the
1273            // context.
1274            dependencies: Default::default(),
1275        }
1276    }
1277}
1278
1279/// parent and dependencies are ignored for the comparison, as they do not matter for termination.
1280impl PartialEq for ComputationContext {
1281    fn eq(&self, other: &Self) -> bool {
1282        self.pc == other.pc
1283            && self.registers == other.registers
1284            && self.stack == other.stack
1285            && self.array_bounds == other.array_bounds
1286    }
1287}
1288
1289impl ComputationContext {
1290    /// Replace all instances of the NullOr type with the given `null_id` to either 0 or the
1291    /// subtype depending on `is_null`
1292    fn set_null(&mut self, null_id: &MemoryId, is_null: bool) {
1293        for i in 0..self.registers.len() {
1294            self.registers[i].set_null(null_id, is_null);
1295        }
1296        self.stack.set_null(null_id, is_null);
1297    }
1298
1299    fn reg(&self, index: Register) -> Result<Type, String> {
1300        if index >= REGISTER_COUNT {
1301            return Err(format!("R{index} is invalid"));
1302        }
1303        if index < GENERAL_REGISTER_COUNT {
1304            Ok(self.registers[index as usize].clone())
1305        } else {
1306            Ok(Type::PtrToStack { offset: StackOffset::default() })
1307        }
1308    }
1309
1310    fn set_reg(&mut self, index: Register, reg_type: Type) -> Result<(), String> {
1311        if index >= GENERAL_REGISTER_COUNT {
1312            return Err(format!("R{index} is invalid"));
1313        }
1314        self.registers[index as usize] = reg_type;
1315        Ok(())
1316    }
1317
1318    fn update_array_bounds(&mut self, id: MemoryId, new_bound: ScalarValueData) {
1319        let new_bound_min = new_bound.min();
1320        self.array_bounds
1321            .entry(id)
1322            .and_modify(|v| *v = std::cmp::max(*v, new_bound_min))
1323            .or_insert(new_bound_min);
1324    }
1325
1326    fn get_map_schema(&self, argument: u8) -> Result<MapSchema, String> {
1327        match self.reg(argument + 1)? {
1328            Type::ConstPtrToMap { schema, .. } => Ok(schema),
1329            _ => Err(format!("No map found at argument {argument}")),
1330        }
1331    }
1332
1333    fn next(&self) -> Result<Self, String> {
1334        let parent = Some(Arc::new(self.clone()));
1335        self.jump_with_offset(0, parent)
1336    }
1337
1338    /// Returns a new `ComputationContext` where the pc has jump by `offset + 1`. In particular,
1339    /// the next instruction is reached with `jump_with_offset(0)`.
1340    fn jump_with_offset(&self, offset: i16, parent: Option<Arc<Self>>) -> Result<Self, String> {
1341        let pc = self
1342            .pc
1343            .checked_add_signed(offset.into())
1344            .and_then(|v| v.checked_add_signed(1))
1345            .ok_or_else(|| "jump outside of program".to_string())?;
1346        let result = Self {
1347            pc,
1348            registers: self.registers.clone(),
1349            stack: self.stack.clone(),
1350            array_bounds: self.array_bounds.clone(),
1351            resources: self.resources.clone(),
1352            parent,
1353            dependencies: Default::default(),
1354        };
1355        Ok(result)
1356    }
1357
1358    fn check_memory_access(
1359        &self,
1360        dst_offset: ScalarValueData,
1361        dst_buffer_size: u64,
1362        instruction_offset: i16,
1363        width: usize,
1364    ) -> Result<(), String> {
1365        let memory_range = dst_offset.urange + instruction_offset + U64Range::new(0, width as u64);
1366        if memory_range.max > dst_buffer_size {
1367            return Err("out of bound access".to_string());
1368        }
1369        Ok(())
1370    }
1371
1372    fn store_memory(
1373        &mut self,
1374        context: &mut VerificationContext<'_>,
1375        addr: &Type,
1376        field: Field,
1377        value: Type,
1378    ) -> Result<(), String> {
1379        let addr = addr.inner(self)?;
1380        match *addr {
1381            Type::PtrToStack { offset } => {
1382                let offset_sum = offset.add(field.offset);
1383                return self.stack.store(offset_sum, value, field.width);
1384            }
1385            Type::PtrToMemory { offset, buffer_size, .. } => {
1386                self.check_memory_access(offset, buffer_size, field.offset, field.width.bytes())?;
1387            }
1388            Type::PtrToStruct { ref id, offset, ref descriptor, .. } => {
1389                let field_desc = descriptor
1390                    .find_field(offset, field)
1391                    .ok_or_else(|| "incorrect store".to_string())?;
1392
1393                if !matches!(field_desc.field_type, FieldType::MutableScalar { .. }) {
1394                    return Err("store to a read-only field".to_string());
1395                }
1396
1397                context.register_struct_access(StructAccess {
1398                    pc: self.pc,
1399                    memory_id: id.clone(),
1400                    field_offset: field_desc.offset,
1401                    is_32_bit_ptr_load: false,
1402                })?;
1403            }
1404            Type::PtrToArray { ref id, offset } => {
1405                self.check_memory_access(
1406                    offset,
1407                    *self.array_bounds.get(&id).unwrap_or(&0),
1408                    field.offset,
1409                    field.width.bytes(),
1410                )?;
1411            }
1412            _ => return Err("incorrect store".to_string()),
1413        }
1414
1415        match value {
1416            Type::ScalarValue(data) if data.is_fully_initialized() => {}
1417            // Private data should not be leaked.
1418            _ => return Err("incorrect store".to_string()),
1419        }
1420        Ok(())
1421    }
1422
1423    fn load_memory(
1424        &self,
1425        context: &mut VerificationContext<'_>,
1426        addr: &Type,
1427        field: Field,
1428    ) -> Result<Type, String> {
1429        let addr = addr.inner(self)?;
1430        match *addr {
1431            Type::PtrToStack { offset } => {
1432                let offset_sum = offset.add(field.offset);
1433                self.stack.load(offset_sum, field.width)
1434            }
1435            Type::PtrToMemory { ref id, offset, buffer_size, .. } => {
1436                self.check_memory_access(offset, buffer_size, field.offset, field.width.bytes())?;
1437                Ok(Type::UNKNOWN_SCALAR)
1438            }
1439            Type::PtrToStruct { ref id, offset, ref descriptor, .. } => {
1440                let field_desc = descriptor
1441                    .find_field(offset, field)
1442                    .ok_or_else(|| "incorrect load".to_string())?;
1443
1444                let (return_type, is_32_bit_ptr_load) = match &field_desc.field_type {
1445                    FieldType::Scalar { .. } | FieldType::MutableScalar { .. } => {
1446                        (Type::UNKNOWN_SCALAR, false)
1447                    }
1448                    FieldType::PtrToArray { id: array_id, is_32_bit } => (
1449                        Type::PtrToArray { id: array_id.prepended(id.clone()), offset: 0.into() },
1450                        *is_32_bit,
1451                    ),
1452                    FieldType::PtrToEndArray { id: array_id, is_32_bit } => {
1453                        (Type::PtrToEndArray { id: array_id.prepended(id.clone()) }, *is_32_bit)
1454                    }
1455                    FieldType::PtrToMemory { id: memory_id, buffer_size, is_32_bit } => (
1456                        Type::PtrToMemory {
1457                            id: memory_id.prepended(id.clone()),
1458                            offset: 0.into(),
1459                            buffer_size: *buffer_size as u64,
1460                        },
1461                        *is_32_bit,
1462                    ),
1463                    FieldType::NullablePtrToMemory { id: memory_id, buffer_size, is_32_bit } => {
1464                        let id = memory_id.prepended(id.clone());
1465                        (
1466                            Type::NullOr {
1467                                id: id.clone(),
1468                                inner: Box::new(Type::PtrToMemory {
1469                                    id,
1470                                    offset: 0.into(),
1471                                    buffer_size: *buffer_size as u64,
1472                                }),
1473                            },
1474                            *is_32_bit,
1475                        )
1476                    }
1477                };
1478
1479                context.register_struct_access(StructAccess {
1480                    pc: self.pc,
1481                    memory_id: id.clone(),
1482                    field_offset: field_desc.offset,
1483                    is_32_bit_ptr_load,
1484                })?;
1485
1486                Ok(return_type)
1487            }
1488            Type::PtrToArray { ref id, offset } => {
1489                self.check_memory_access(
1490                    offset,
1491                    *self.array_bounds.get(&id).unwrap_or(&0),
1492                    field.offset,
1493                    field.width.bytes(),
1494                )?;
1495                Ok(Type::UNKNOWN_SCALAR)
1496            }
1497            _ => Err("incorrect load".to_string()),
1498        }
1499    }
1500
1501    /**
1502     * Given the given `return_value` in a method signature, return the concrete type to use,
1503     * updating the `next` context is needed.
1504     *
1505     * `maybe_null` is true is the computed value will be a descendant of a `NullOr` type.
1506     */
1507    fn resolve_return_value(
1508        &self,
1509        verification_context: &mut VerificationContext<'_>,
1510        return_value: &Type,
1511        next: &mut ComputationContext,
1512        maybe_null: bool,
1513    ) -> Result<Type, String> {
1514        match return_value {
1515            Type::AliasParameter { parameter_index } => self.reg(parameter_index + 1),
1516            Type::ReleasableParameter { id, inner } => {
1517                let id = MemoryId::from(verification_context.next_id()).prepended(id.clone());
1518                if !maybe_null {
1519                    next.resources.insert(id.clone());
1520                }
1521                Ok(Type::Releasable {
1522                    id,
1523                    inner: Box::new(self.resolve_return_value(
1524                        verification_context,
1525                        inner,
1526                        next,
1527                        maybe_null,
1528                    )?),
1529                })
1530            }
1531            Type::NullOrParameter(t) => {
1532                let id = verification_context.next_id();
1533                Ok(Type::NullOr {
1534                    id: id.into(),
1535                    inner: Box::new(self.resolve_return_value(
1536                        verification_context,
1537                        t,
1538                        next,
1539                        true,
1540                    )?),
1541                })
1542            }
1543            Type::MapValueParameter { map_ptr_index } => {
1544                let schema = self.get_map_schema(*map_ptr_index)?;
1545                let id = verification_context.next_id();
1546                Ok(Type::PtrToMemory {
1547                    id: id.into(),
1548                    offset: 0.into(),
1549                    buffer_size: schema.value_size as u64,
1550                })
1551            }
1552            Type::MemoryParameter { size, .. } => {
1553                let buffer_size = size.size(self)?;
1554                let id = verification_context.next_id();
1555                Ok(Type::PtrToMemory { id: id.into(), offset: 0.into(), buffer_size })
1556            }
1557            t => Ok(t.clone()),
1558        }
1559    }
1560
1561    fn compute_source(&self, src: Source) -> Result<Type, String> {
1562        match src {
1563            Source::Reg(reg) => self.reg(reg),
1564            Source::Value(v) => Ok(v.into()),
1565        }
1566    }
1567
1568    fn apply_computation(
1569        context: &ComputationContext,
1570        op1: Type,
1571        op2: Type,
1572        alu_type: AluType,
1573        op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1574    ) -> Result<Type, String> {
1575        let result: Type = match (alu_type, op1.inner(context)?, op2.inner(context)?) {
1576            (_, Type::ScalarValue(data1), Type::ScalarValue(data2)) => op(*data1, *data2).into(),
1577            (
1578                AluType::Add,
1579                Type::ScalarValue(_),
1580                Type::PtrToStack { .. } | Type::PtrToMemory { .. } | Type::PtrToStruct { .. },
1581            ) => {
1582                return Self::apply_computation(context, op2, op1, alu_type, op);
1583            }
1584            (alu_type, Type::PtrToStack { offset: x }, Type::ScalarValue(data))
1585                if alu_type.is_ptr_compatible() =>
1586            {
1587                Type::PtrToStack { offset: run_on_stack_offset(*x, |x| op(x, *data)) }
1588            }
1589            (
1590                alu_type,
1591                Type::PtrToMemory { id, offset: x, buffer_size },
1592                Type::ScalarValue(data),
1593            ) if alu_type.is_ptr_compatible() => {
1594                let offset = op(*x, *data);
1595                Type::PtrToMemory { id: id.clone(), offset, buffer_size: *buffer_size }
1596            }
1597            (
1598                alu_type,
1599                Type::PtrToStruct { id, offset: x, descriptor },
1600                Type::ScalarValue(data),
1601            ) if alu_type.is_ptr_compatible() => {
1602                let offset = op(*x, *data);
1603                Type::PtrToStruct { id: id.clone(), offset, descriptor: descriptor.clone() }
1604            }
1605            (AluType::Add, Type::PtrToArray { id, offset: x }, Type::ScalarValue(data)) => {
1606                let offset = x.checked_add(*data).ok_or_else(|| format!("XXX"))?;
1607                Type::PtrToArray { id: id.clone(), offset }
1608            }
1609            (AluType::Sub, Type::PtrToArray { id, offset: x }, Type::ScalarValue(data)) => {
1610                let offset = x.checked_sub(*data).ok_or_else(|| format!("XXX"))?;
1611                Type::PtrToArray { id: id.clone(), offset }
1612            }
1613            (
1614                AluType::Sub,
1615                Type::PtrToMemory { id: id1, offset: x1, .. },
1616                Type::PtrToMemory { id: id2, offset: x2, .. },
1617            )
1618            | (
1619                AluType::Sub,
1620                Type::PtrToStruct { id: id1, offset: x1, .. },
1621                Type::PtrToStruct { id: id2, offset: x2, .. },
1622            )
1623            | (
1624                AluType::Sub,
1625                Type::PtrToArray { id: id1, offset: x1 },
1626                Type::PtrToArray { id: id2, offset: x2 },
1627            ) if id1 == id2 => Type::from(op(*x1, *x2)),
1628            (AluType::Sub, Type::PtrToStack { offset: x1 }, Type::PtrToStack { offset: x2 }) => {
1629                Type::from(op(x1.reg(), x2.reg()))
1630            }
1631            (
1632                AluType::Sub,
1633                Type::PtrToArray { id: id1, .. },
1634                Type::PtrToEndArray { id: id2, .. },
1635            )
1636            | (
1637                AluType::Sub,
1638                Type::PtrToEndArray { id: id1, .. },
1639                Type::PtrToArray { id: id2, .. },
1640            ) if id1 == id2 => Type::UNKNOWN_SCALAR,
1641            _ => Type::default(),
1642        };
1643        Ok(result)
1644    }
1645
1646    fn alu(
1647        &mut self,
1648        op_name: Option<&str>,
1649        verification_context: &mut VerificationContext<'_>,
1650        dst: Register,
1651        src: Source,
1652        alu_type: AluType,
1653        op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1654    ) -> Result<(), String> {
1655        if let Some(op_name) = op_name {
1656            bpf_log!(
1657                self,
1658                verification_context,
1659                "{op_name} {}, {}",
1660                display_register(dst),
1661                display_source(src)
1662            );
1663        }
1664        let op1 = self.reg(dst)?;
1665        let op2 = self.compute_source(src)?;
1666        let result = Self::apply_computation(self, op1, op2, alu_type, op)?;
1667        let mut next = self.next()?;
1668        next.set_reg(dst, result)?;
1669        verification_context.states.push(next);
1670        Ok(())
1671    }
1672
1673    fn log_atomic_operation(
1674        &mut self,
1675        op_name: &str,
1676        verification_context: &mut VerificationContext<'_>,
1677        fetch: bool,
1678        dst: Register,
1679        offset: i16,
1680        src: Register,
1681    ) {
1682        bpf_log!(
1683            self,
1684            verification_context,
1685            "lock {}{} [{}{}], {}",
1686            if fetch { "fetch " } else { "" },
1687            op_name,
1688            display_register(dst),
1689            print_offset(offset),
1690            display_register(src),
1691        );
1692    }
1693
1694    fn raw_atomic_operation(
1695        &mut self,
1696        op_name: &str,
1697        verification_context: &mut VerificationContext<'_>,
1698        width: DataWidth,
1699        fetch: bool,
1700        dst: Register,
1701        offset: i16,
1702        src: Register,
1703        op: impl FnOnce(&ComputationContext, Type, Type) -> Result<Type, String>,
1704    ) -> Result<(), String> {
1705        self.log_atomic_operation(op_name, verification_context, fetch, dst, offset, src);
1706        let addr = self.reg(dst)?;
1707        let value = self.reg(src)?;
1708        let field = Field::new(offset, width);
1709        let loaded_type = self.load_memory(verification_context, &addr, field)?;
1710        let result = op(self, loaded_type.clone(), value)?;
1711        let mut next = self.next()?;
1712        next.store_memory(verification_context, &addr, field, result)?;
1713        if fetch {
1714            next.set_reg(src, loaded_type)?;
1715        }
1716        verification_context.states.push(next);
1717        Ok(())
1718    }
1719
1720    fn atomic_operation(
1721        &mut self,
1722        op_name: &str,
1723        verification_context: &mut VerificationContext<'_>,
1724        width: DataWidth,
1725        fetch: bool,
1726        dst: Register,
1727        offset: i16,
1728        src: Register,
1729        alu_type: AluType,
1730        op: impl Fn(ScalarValueData, ScalarValueData) -> ScalarValueData,
1731    ) -> Result<(), String> {
1732        self.raw_atomic_operation(
1733            op_name,
1734            verification_context,
1735            width,
1736            fetch,
1737            dst,
1738            offset,
1739            src,
1740            |context: &ComputationContext, v1: Type, v2: Type| {
1741                Self::apply_computation(context, v1, v2, alu_type, op)
1742            },
1743        )
1744    }
1745
1746    fn raw_atomic_cmpxchg(
1747        &mut self,
1748        op_name: &str,
1749        verification_context: &mut VerificationContext<'_>,
1750        dst: Register,
1751        offset: i16,
1752        src: Register,
1753        jump_width: JumpWidth,
1754        op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1755    ) -> Result<(), String> {
1756        self.log_atomic_operation(op_name, verification_context, true, dst, offset, src);
1757        let width = match jump_width {
1758            JumpWidth::W32 => DataWidth::U32,
1759            JumpWidth::W64 => DataWidth::U64,
1760        };
1761        let addr = self.reg(dst)?;
1762        let field = Field::new(offset, width);
1763        let dst = self.load_memory(verification_context, &addr, field)?;
1764        let value = self.reg(src)?;
1765        let r0 = self.reg(0)?;
1766        let branch = self.compute_branch(jump_width, &dst, &r0, op)?;
1767        // r0 = dst
1768        if branch.unwrap_or(true) {
1769            let mut next = self.next()?;
1770            let (dst, r0) =
1771                Type::constraint(&mut next, JumpType::Eq, jump_width, dst.clone(), r0.clone())?;
1772            next.set_reg(0, dst)?;
1773            next.store_memory(verification_context, &addr, field, value)?;
1774            verification_context.states.push(next);
1775        }
1776        // r0 != dst
1777        if !branch.unwrap_or(false) {
1778            let mut next = self.next()?;
1779            let (dst, r0) = Type::constraint(&mut next, JumpType::Ne, jump_width, dst, r0)?;
1780            next.set_reg(0, dst.clone())?;
1781            next.store_memory(verification_context, &addr, field, dst)?;
1782            verification_context.states.push(next);
1783        }
1784
1785        Ok(())
1786    }
1787    fn endianness<BO: ByteOrder>(
1788        &mut self,
1789        op_name: &str,
1790        verification_context: &mut VerificationContext<'_>,
1791        dst: Register,
1792        width: DataWidth,
1793    ) -> Result<(), String> {
1794        bpf_log!(self, verification_context, "{op_name}{} {}", width.bits(), display_register(dst),);
1795        let bit_op = |value: u64| match width {
1796            DataWidth::U16 => BO::read_u16((value as u16).as_bytes()) as u64,
1797            DataWidth::U32 => BO::read_u32((value as u32).as_bytes()) as u64,
1798            DataWidth::U64 => BO::read_u64(value.as_bytes()),
1799            _ => {
1800                panic!("Unexpected bit width for endianness operation");
1801            }
1802        };
1803        let value = self.reg(dst)?;
1804        let new_value = match value {
1805            Type::ScalarValue(data) => Type::ScalarValue(ScalarValueData::new(
1806                bit_op(data.value),
1807                bit_op(data.unknown_mask),
1808                bit_op(data.unwritten_mask),
1809                U64Range::max(),
1810            )),
1811            _ => Type::default(),
1812        };
1813        let mut next = self.next()?;
1814        next.set_reg(dst, new_value)?;
1815        verification_context.states.push(next);
1816        Ok(())
1817    }
1818
1819    fn compute_branch(
1820        &self,
1821        jump_width: JumpWidth,
1822        op1: &Type,
1823        op2: &Type,
1824        op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1825    ) -> Result<Option<bool>, String> {
1826        match (jump_width, op1, op2) {
1827            (_, Type::ScalarValue(data1), Type::ScalarValue(data2)) => op(*data1, *data2),
1828            (JumpWidth::W64, Type::ScalarValue(data), Type::NullOr { .. })
1829            | (JumpWidth::W64, Type::NullOr { .. }, Type::ScalarValue(data))
1830                if data.is_zero() =>
1831            {
1832                Ok(None)
1833            }
1834
1835            (JumpWidth::W64, Type::ScalarValue(data), t) if data.is_zero() && t.is_non_zero() => {
1836                op(1.into(), 0.into())
1837            }
1838
1839            (JumpWidth::W64, t, Type::ScalarValue(data)) if data.is_zero() && t.is_non_zero() => {
1840                op(0.into(), 1.into())
1841            }
1842
1843            (JumpWidth::W64, Type::PtrToStack { offset: x }, Type::PtrToStack { offset: y }) => {
1844                op(x.reg(), y.reg())
1845            }
1846
1847            (
1848                JumpWidth::W64,
1849                Type::PtrToMemory { id: id1, offset: x, .. },
1850                Type::PtrToMemory { id: id2, offset: y, .. },
1851            )
1852            | (
1853                JumpWidth::W64,
1854                Type::PtrToStruct { id: id1, offset: x, .. },
1855                Type::PtrToStruct { id: id2, offset: y, .. },
1856            )
1857            | (
1858                JumpWidth::W64,
1859                Type::PtrToArray { id: id1, offset: x, .. },
1860                Type::PtrToArray { id: id2, offset: y, .. },
1861            ) if *id1 == *id2 => op(*x, *y),
1862
1863            (JumpWidth::W64, Type::PtrToArray { id: id1, .. }, Type::PtrToEndArray { id: id2 })
1864            | (JumpWidth::W64, Type::PtrToEndArray { id: id1 }, Type::PtrToArray { id: id2, .. })
1865                if *id1 == *id2 =>
1866            {
1867                Ok(None)
1868            }
1869
1870            _ => Err(()),
1871        }
1872        .map_err(|_| "non permitted comparison".to_string())
1873    }
1874
1875    fn conditional_jump(
1876        &mut self,
1877        op_name: &str,
1878        verification_context: &mut VerificationContext<'_>,
1879        dst: Register,
1880        src: Source,
1881        offset: i16,
1882        jump_type: JumpType,
1883        jump_width: JumpWidth,
1884        op: impl Fn(ScalarValueData, ScalarValueData) -> Result<Option<bool>, ()>,
1885    ) -> Result<(), String> {
1886        bpf_log!(
1887            self,
1888            verification_context,
1889            "{op_name} {}, {}, {}",
1890            display_register(dst),
1891            display_source(src),
1892            if offset == 0 { format!("0") } else { print_offset(offset) },
1893        );
1894        let op1 = self.reg(dst)?;
1895        let op2 = self.compute_source(src.clone())?;
1896        let apply_constraints_and_register = |mut next: Self,
1897                                              jump_type: JumpType|
1898         -> Result<Self, String> {
1899            if jump_type != JumpType::Unknown {
1900                let (new_op1, new_op2) =
1901                    Type::constraint(&mut next, jump_type, jump_width, op1.clone(), op2.clone())?;
1902                if dst < REGISTER_COUNT {
1903                    next.set_reg(dst, new_op1)?;
1904                }
1905                match src {
1906                    Source::Reg(r) => {
1907                        next.set_reg(r, new_op2)?;
1908                    }
1909                    _ => {
1910                        // Nothing to do
1911                    }
1912                }
1913            }
1914            Ok(next)
1915        };
1916        let branch = self.compute_branch(jump_width, &op1, &op2, op)?;
1917        let parent = Some(Arc::new(self.clone()));
1918        if branch.unwrap_or(true) {
1919            // Do the jump
1920            verification_context.states.push(apply_constraints_and_register(
1921                self.jump_with_offset(offset, parent.clone())?,
1922                jump_type,
1923            )?);
1924        }
1925        if !branch.unwrap_or(false) {
1926            // Skip the jump
1927            verification_context.states.push(apply_constraints_and_register(
1928                self.jump_with_offset(0, parent)?,
1929                jump_type.invert(),
1930            )?);
1931        }
1932        Ok(())
1933    }
1934
1935    /// Handles the termination of a `ComputationContext`, performing branch cutting optimization.
1936    ///
1937    /// This method is called when it has been proven that the current context terminates (e.g.,
1938    /// reaches an `exit` instruction).
1939    ///
1940    /// The following steps are performed:
1941    /// 1. **Dependency Calculation:** The data dependencies of the context are computed based on
1942    ///    the dependencies of its terminated children and the instruction at the current PC.
1943    /// 2. **Context Broadening:** The context's state is broadened by clearing registers and stack
1944    ///    slots that are *not* in the calculated data dependencies. This optimization assumes that
1945    ///    data not used by the terminated branch is irrelevant for future execution paths.
1946    /// 3. **Termination Registration:** The broadened context is added to the set of terminating
1947    ///    contexts if it is not less than any existing terminating context at the same PC.
1948    /// 4. **Parent Termination:** If all the children of the current context have terminated,
1949    ///    its parent context is recursively terminated.
1950    fn terminate(self, verification_context: &mut VerificationContext<'_>) -> Result<(), String> {
1951        let mut next = Some(self);
1952        // Because of the potential length of the parent chain, do not use recursion.
1953        while let Some(mut current) = next.take() {
1954            // Take the parent to process it at the end and not keep it in the terminating
1955            // contexts.
1956            let parent = current.parent.take();
1957
1958            // 1. Compute the dependencies of the context using the dependencies of its children
1959            //    and the actual operation.
1960            let mut dependencies = DataDependencies::default();
1961            for dependency in current.dependencies.get_mut().iter() {
1962                dependencies.merge(dependency);
1963            }
1964
1965            dependencies.visit(
1966                &mut DataDependenciesVisitorContext {
1967                    calling_context: &verification_context.calling_context,
1968                    computation_context: &current,
1969                },
1970                verification_context.code[current.pc],
1971            )?;
1972
1973            // 2. Clear the state depending on the dependencies states
1974            for register in 0..GENERAL_REGISTER_COUNT {
1975                if !dependencies.registers.contains(&register) {
1976                    current.set_reg(register, Default::default())?;
1977                }
1978            }
1979            current.stack.data.retain(|k, _| dependencies.stack.contains(k));
1980
1981            // 3. Add the cleared state to the set of `terminating_contexts`
1982            let terminating_contexts =
1983                verification_context.terminating_contexts.entry(current.pc).or_default();
1984            let mut is_dominated = false;
1985            terminating_contexts.retain(|c| match c.computation_context.partial_cmp(&current) {
1986                Some(Ordering::Less) => false,
1987                Some(Ordering::Equal) | Some(Ordering::Greater) => {
1988                    // If the list contains a context greater or equal to the current one, it
1989                    // should not be added.
1990                    is_dominated = true;
1991                    true
1992                }
1993                _ => true,
1994            });
1995            if !is_dominated {
1996                terminating_contexts.push(TerminatingContext {
1997                    computation_context: current,
1998                    dependencies: dependencies.clone(),
1999                });
2000            }
2001
2002            // 4. Register the computed dependencies in our parent, and terminate it if all
2003            //    dependencies has been computed.
2004            if let Some(parent) = parent {
2005                parent.dependencies.lock().push(dependencies);
2006                // To check whether all dependencies have been computed, rely on the fact that the Arc
2007                // count of the parent keep track of how many dependencies are left.
2008                next = Arc::into_inner(parent);
2009            }
2010        }
2011        Ok(())
2012    }
2013}
2014
2015impl Drop for ComputationContext {
2016    fn drop(&mut self) {
2017        let mut next = self.parent.take().and_then(Arc::into_inner);
2018        // Because of the potential length of the parent chain, do not use recursion.
2019        while let Some(mut current) = next {
2020            next = current.parent.take().and_then(Arc::into_inner);
2021        }
2022    }
2023}
2024
2025/// Two types are ordered with `t1` > `t2` if a proof that a program in a state `t1` finish is also
2026/// a proof that a program in a state `t2` finish.
2027impl PartialOrd for ComputationContext {
2028    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2029        if self.pc != other.pc || self.resources != other.resources {
2030            return None;
2031        }
2032        let mut result = self.stack.partial_cmp(&other.stack)?;
2033        result = associate_orderings(
2034            result,
2035            Type::compare_list(self.registers.iter(), other.registers.iter())?,
2036        )?;
2037        let mut array_bound_iter1 = self.array_bounds.iter().peekable();
2038        let mut array_bound_iter2 = other.array_bounds.iter().peekable();
2039        let result = loop {
2040            match (array_bound_iter1.peek().cloned(), array_bound_iter2.peek().cloned()) {
2041                (None, None) => break result,
2042                (None, _) => break associate_orderings(result, Ordering::Greater)?,
2043                (_, None) => break associate_orderings(result, Ordering::Less)?,
2044                (Some((k1, v1)), Some((k2, v2))) => match k1.cmp(k2) {
2045                    Ordering::Equal => {
2046                        array_bound_iter1.next();
2047                        array_bound_iter2.next();
2048                        result = associate_orderings(result, v2.cmp(v1))?;
2049                    }
2050                    v @ Ordering::Less => {
2051                        array_bound_iter1.next();
2052                        result = associate_orderings(result, v)?;
2053                    }
2054                    v @ Ordering::Greater => {
2055                        array_bound_iter2.next();
2056                        result = associate_orderings(result, v)?;
2057                    }
2058                },
2059            }
2060        };
2061        Some(result)
2062    }
2063}
2064
2065/// Represents the read data dependencies of an eBPF program branch.
2066///
2067/// This struct tracks which registers and stack positions are *read* by the
2068/// instructions within a branch of the eBPF program.  This information is used
2069/// during branch cutting optimization to broaden terminating contexts.
2070///
2071/// The verifier assumes that data not read by a terminated branch is irrelevant
2072/// for future execution paths and can be safely cleared.
2073#[derive(Clone, Debug, Default)]
2074struct DataDependencies {
2075    /// The set of registers read by the children of a context.
2076    registers: HashSet<Register>,
2077    /// The stack positions read by the children of a context.
2078    stack: HashSet<usize>,
2079}
2080
2081impl DataDependencies {
2082    fn merge(&mut self, other: &DataDependencies) {
2083        self.registers.extend(other.registers.iter());
2084        self.stack.extend(other.stack.iter());
2085    }
2086
2087    fn alu(&mut self, dst: Register, src: Source) -> Result<(), String> {
2088        // Only do something if the dst is read, otherwise the computation doesn't matter.
2089        if self.registers.contains(&dst) {
2090            if let Source::Reg(src) = src {
2091                self.registers.insert(src);
2092            }
2093        }
2094        Ok(())
2095    }
2096
2097    fn jmp(&mut self, dst: Register, src: Source) -> Result<(), String> {
2098        self.registers.insert(dst);
2099        if let Source::Reg(src) = src {
2100            self.registers.insert(src);
2101        }
2102        Ok(())
2103    }
2104
2105    fn atomic(
2106        &mut self,
2107        context: &ComputationContext,
2108        fetch: bool,
2109        dst: Register,
2110        offset: i16,
2111        src: Register,
2112        width: DataWidth,
2113        is_cmpxchg: bool,
2114    ) -> Result<(), String> {
2115        let mut is_read = false;
2116        if is_cmpxchg && self.registers.contains(&0) {
2117            is_read = true;
2118        }
2119        if fetch && self.registers.contains(&src) {
2120            is_read = true;
2121        }
2122        let addr = context.reg(dst)?;
2123        if let Type::PtrToStack { offset: stack_offset } = addr {
2124            let stack_offset = stack_offset.add(offset);
2125            if !stack_offset.is_valid() {
2126                return Err(format!("Invalid stack offset at {}", context.pc));
2127            }
2128            if is_read || self.stack.contains(&stack_offset.array_index()) {
2129                is_read = true;
2130                self.stack.insert(stack_offset.array_index());
2131            }
2132        }
2133        if is_read {
2134            self.registers.insert(0);
2135            self.registers.insert(src);
2136        }
2137        self.registers.insert(dst);
2138        Ok(())
2139    }
2140}
2141
2142struct DataDependenciesVisitorContext<'a> {
2143    calling_context: &'a CallingContext,
2144    computation_context: &'a ComputationContext,
2145}
2146
2147impl BpfVisitor for DataDependencies {
2148    type Context<'a> = DataDependenciesVisitorContext<'a>;
2149
2150    fn add<'a>(
2151        &mut self,
2152        _context: &mut Self::Context<'a>,
2153        dst: Register,
2154        src: Source,
2155    ) -> Result<(), String> {
2156        self.alu(dst, src)
2157    }
2158    fn add64<'a>(
2159        &mut self,
2160        _context: &mut Self::Context<'a>,
2161        dst: Register,
2162        src: Source,
2163    ) -> Result<(), String> {
2164        self.alu(dst, src)
2165    }
2166    fn and<'a>(
2167        &mut self,
2168        _context: &mut Self::Context<'a>,
2169        dst: Register,
2170        src: Source,
2171    ) -> Result<(), String> {
2172        self.alu(dst, src)
2173    }
2174    fn and64<'a>(
2175        &mut self,
2176        _context: &mut Self::Context<'a>,
2177        dst: Register,
2178        src: Source,
2179    ) -> Result<(), String> {
2180        self.alu(dst, src)
2181    }
2182    fn arsh<'a>(
2183        &mut self,
2184        _context: &mut Self::Context<'a>,
2185        dst: Register,
2186        src: Source,
2187    ) -> Result<(), String> {
2188        self.alu(dst, src)
2189    }
2190    fn arsh64<'a>(
2191        &mut self,
2192        _context: &mut Self::Context<'a>,
2193        dst: Register,
2194        src: Source,
2195    ) -> Result<(), String> {
2196        self.alu(dst, src)
2197    }
2198    fn div<'a>(
2199        &mut self,
2200        _context: &mut Self::Context<'a>,
2201        dst: Register,
2202        src: Source,
2203    ) -> Result<(), String> {
2204        self.alu(dst, src)
2205    }
2206    fn div64<'a>(
2207        &mut self,
2208        _context: &mut Self::Context<'a>,
2209        dst: Register,
2210        src: Source,
2211    ) -> Result<(), String> {
2212        self.alu(dst, src)
2213    }
2214    fn lsh<'a>(
2215        &mut self,
2216        _context: &mut Self::Context<'a>,
2217        dst: Register,
2218        src: Source,
2219    ) -> Result<(), String> {
2220        self.alu(dst, src)
2221    }
2222    fn lsh64<'a>(
2223        &mut self,
2224        _context: &mut Self::Context<'a>,
2225        dst: Register,
2226        src: Source,
2227    ) -> Result<(), String> {
2228        self.alu(dst, src)
2229    }
2230    fn r#mod<'a>(
2231        &mut self,
2232        _context: &mut Self::Context<'a>,
2233        dst: Register,
2234        src: Source,
2235    ) -> Result<(), String> {
2236        self.alu(dst, src)
2237    }
2238    fn mod64<'a>(
2239        &mut self,
2240        _context: &mut Self::Context<'a>,
2241        dst: Register,
2242        src: Source,
2243    ) -> Result<(), String> {
2244        self.alu(dst, src)
2245    }
2246    fn mul<'a>(
2247        &mut self,
2248        _context: &mut Self::Context<'a>,
2249        dst: Register,
2250        src: Source,
2251    ) -> Result<(), String> {
2252        self.alu(dst, src)
2253    }
2254    fn mul64<'a>(
2255        &mut self,
2256        _context: &mut Self::Context<'a>,
2257        dst: Register,
2258        src: Source,
2259    ) -> Result<(), String> {
2260        self.alu(dst, src)
2261    }
2262    fn or<'a>(
2263        &mut self,
2264        _context: &mut Self::Context<'a>,
2265        dst: Register,
2266        src: Source,
2267    ) -> Result<(), String> {
2268        self.alu(dst, src)
2269    }
2270    fn or64<'a>(
2271        &mut self,
2272        _context: &mut Self::Context<'a>,
2273        dst: Register,
2274        src: Source,
2275    ) -> Result<(), String> {
2276        self.alu(dst, src)
2277    }
2278    fn rsh<'a>(
2279        &mut self,
2280        _context: &mut Self::Context<'a>,
2281        dst: Register,
2282        src: Source,
2283    ) -> Result<(), String> {
2284        self.alu(dst, src)
2285    }
2286    fn rsh64<'a>(
2287        &mut self,
2288        _context: &mut Self::Context<'a>,
2289        dst: Register,
2290        src: Source,
2291    ) -> Result<(), String> {
2292        self.alu(dst, src)
2293    }
2294    fn sub<'a>(
2295        &mut self,
2296        _context: &mut Self::Context<'a>,
2297        dst: Register,
2298        src: Source,
2299    ) -> Result<(), String> {
2300        self.alu(dst, src)
2301    }
2302    fn sub64<'a>(
2303        &mut self,
2304        _context: &mut Self::Context<'a>,
2305        dst: Register,
2306        src: Source,
2307    ) -> Result<(), String> {
2308        self.alu(dst, src)
2309    }
2310    fn xor<'a>(
2311        &mut self,
2312        _context: &mut Self::Context<'a>,
2313        dst: Register,
2314        src: Source,
2315    ) -> Result<(), String> {
2316        self.alu(dst, src)
2317    }
2318    fn xor64<'a>(
2319        &mut self,
2320        _context: &mut Self::Context<'a>,
2321        dst: Register,
2322        src: Source,
2323    ) -> Result<(), String> {
2324        self.alu(dst, src)
2325    }
2326
2327    fn mov<'a>(
2328        &mut self,
2329        _context: &mut Self::Context<'a>,
2330        dst: Register,
2331        src: Source,
2332    ) -> Result<(), String> {
2333        if src == Source::Reg(dst) || !self.registers.contains(&dst) {
2334            return Ok(());
2335        }
2336        if let Source::Reg(src) = src {
2337            self.registers.insert(src);
2338        }
2339        self.registers.remove(&dst);
2340        Ok(())
2341    }
2342    fn mov64<'a>(
2343        &mut self,
2344        context: &mut Self::Context<'a>,
2345        dst: Register,
2346        src: Source,
2347    ) -> Result<(), String> {
2348        self.mov(context, dst, src)
2349    }
2350
2351    fn neg<'a>(&mut self, _context: &mut Self::Context<'a>, _dst: Register) -> Result<(), String> {
2352        // This is reading and writing the same value. This induces no change in dependencies.
2353        Ok(())
2354    }
2355    fn neg64<'a>(
2356        &mut self,
2357        _context: &mut Self::Context<'a>,
2358        _dst: Register,
2359    ) -> Result<(), String> {
2360        // This is reading and writing the same value. This induces no change in dependencies.
2361        Ok(())
2362    }
2363
2364    fn be<'a>(
2365        &mut self,
2366        _context: &mut Self::Context<'a>,
2367        _dst: Register,
2368        _width: DataWidth,
2369    ) -> Result<(), String> {
2370        // This is reading and writing the same value. This induces no change in dependencies.
2371        Ok(())
2372    }
2373    fn le<'a>(
2374        &mut self,
2375        _context: &mut Self::Context<'a>,
2376        _dst: Register,
2377        _width: DataWidth,
2378    ) -> Result<(), String> {
2379        // This is reading and writing the same value. This induces no change in dependencies.
2380        Ok(())
2381    }
2382
2383    fn call_external<'a>(
2384        &mut self,
2385        context: &mut Self::Context<'a>,
2386        index: u32,
2387    ) -> Result<(), String> {
2388        let Some(signature) = context.calling_context.helpers.get(&index).cloned() else {
2389            return Err(format!("unknown external function {}", index));
2390        };
2391        // 0 is overwritten and 1 to 5 are scratch registers
2392        for register in 0..signature.args.len() + 1 {
2393            self.registers.remove(&(register as Register));
2394        }
2395        // 1 to k are parameters.
2396        for register in 0..signature.args.len() {
2397            self.registers.insert((register + 1) as Register);
2398        }
2399        Ok(())
2400    }
2401
2402    fn exit<'a>(&mut self, _context: &mut Self::Context<'a>) -> Result<(), String> {
2403        // This read r0 unconditionally.
2404        self.registers.insert(0);
2405        Ok(())
2406    }
2407
2408    fn jump<'a>(&mut self, _context: &mut Self::Context<'a>, _offset: i16) -> Result<(), String> {
2409        // This doesn't do anything with values.
2410        Ok(())
2411    }
2412
2413    fn jeq<'a>(
2414        &mut self,
2415        _context: &mut Self::Context<'a>,
2416        dst: Register,
2417        src: Source,
2418        offset: i16,
2419    ) -> Result<(), String> {
2420        self.jmp(dst, src)
2421    }
2422    fn jeq64<'a>(
2423        &mut self,
2424        _context: &mut Self::Context<'a>,
2425        dst: Register,
2426        src: Source,
2427        offset: i16,
2428    ) -> Result<(), String> {
2429        self.jmp(dst, src)
2430    }
2431    fn jne<'a>(
2432        &mut self,
2433        _context: &mut Self::Context<'a>,
2434        dst: Register,
2435        src: Source,
2436        offset: i16,
2437    ) -> Result<(), String> {
2438        self.jmp(dst, src)
2439    }
2440    fn jne64<'a>(
2441        &mut self,
2442        _context: &mut Self::Context<'a>,
2443        dst: Register,
2444        src: Source,
2445        offset: i16,
2446    ) -> Result<(), String> {
2447        self.jmp(dst, src)
2448    }
2449    fn jge<'a>(
2450        &mut self,
2451        _context: &mut Self::Context<'a>,
2452        dst: Register,
2453        src: Source,
2454        offset: i16,
2455    ) -> Result<(), String> {
2456        self.jmp(dst, src)
2457    }
2458    fn jge64<'a>(
2459        &mut self,
2460        _context: &mut Self::Context<'a>,
2461        dst: Register,
2462        src: Source,
2463        offset: i16,
2464    ) -> Result<(), String> {
2465        self.jmp(dst, src)
2466    }
2467    fn jgt<'a>(
2468        &mut self,
2469        _context: &mut Self::Context<'a>,
2470        dst: Register,
2471        src: Source,
2472        offset: i16,
2473    ) -> Result<(), String> {
2474        self.jmp(dst, src)
2475    }
2476    fn jgt64<'a>(
2477        &mut self,
2478        _context: &mut Self::Context<'a>,
2479        dst: Register,
2480        src: Source,
2481        offset: i16,
2482    ) -> Result<(), String> {
2483        self.jmp(dst, src)
2484    }
2485    fn jle<'a>(
2486        &mut self,
2487        _context: &mut Self::Context<'a>,
2488        dst: Register,
2489        src: Source,
2490        offset: i16,
2491    ) -> Result<(), String> {
2492        self.jmp(dst, src)
2493    }
2494    fn jle64<'a>(
2495        &mut self,
2496        _context: &mut Self::Context<'a>,
2497        dst: Register,
2498        src: Source,
2499        offset: i16,
2500    ) -> Result<(), String> {
2501        self.jmp(dst, src)
2502    }
2503    fn jlt<'a>(
2504        &mut self,
2505        _context: &mut Self::Context<'a>,
2506        dst: Register,
2507        src: Source,
2508        offset: i16,
2509    ) -> Result<(), String> {
2510        self.jmp(dst, src)
2511    }
2512    fn jlt64<'a>(
2513        &mut self,
2514        _context: &mut Self::Context<'a>,
2515        dst: Register,
2516        src: Source,
2517        offset: i16,
2518    ) -> Result<(), String> {
2519        self.jmp(dst, src)
2520    }
2521    fn jsge<'a>(
2522        &mut self,
2523        _context: &mut Self::Context<'a>,
2524        dst: Register,
2525        src: Source,
2526        offset: i16,
2527    ) -> Result<(), String> {
2528        self.jmp(dst, src)
2529    }
2530    fn jsge64<'a>(
2531        &mut self,
2532        _context: &mut Self::Context<'a>,
2533        dst: Register,
2534        src: Source,
2535        offset: i16,
2536    ) -> Result<(), String> {
2537        self.jmp(dst, src)
2538    }
2539    fn jsgt<'a>(
2540        &mut self,
2541        _context: &mut Self::Context<'a>,
2542        dst: Register,
2543        src: Source,
2544        offset: i16,
2545    ) -> Result<(), String> {
2546        self.jmp(dst, src)
2547    }
2548    fn jsgt64<'a>(
2549        &mut self,
2550        _context: &mut Self::Context<'a>,
2551        dst: Register,
2552        src: Source,
2553        offset: i16,
2554    ) -> Result<(), String> {
2555        self.jmp(dst, src)
2556    }
2557    fn jsle<'a>(
2558        &mut self,
2559        _context: &mut Self::Context<'a>,
2560        dst: Register,
2561        src: Source,
2562        offset: i16,
2563    ) -> Result<(), String> {
2564        self.jmp(dst, src)
2565    }
2566    fn jsle64<'a>(
2567        &mut self,
2568        _context: &mut Self::Context<'a>,
2569        dst: Register,
2570        src: Source,
2571        offset: i16,
2572    ) -> Result<(), String> {
2573        self.jmp(dst, src)
2574    }
2575    fn jslt<'a>(
2576        &mut self,
2577        _context: &mut Self::Context<'a>,
2578        dst: Register,
2579        src: Source,
2580        offset: i16,
2581    ) -> Result<(), String> {
2582        self.jmp(dst, src)
2583    }
2584    fn jslt64<'a>(
2585        &mut self,
2586        _context: &mut Self::Context<'a>,
2587        dst: Register,
2588        src: Source,
2589        offset: i16,
2590    ) -> Result<(), String> {
2591        self.jmp(dst, src)
2592    }
2593    fn jset<'a>(
2594        &mut self,
2595        _context: &mut Self::Context<'a>,
2596        dst: Register,
2597        src: Source,
2598        offset: i16,
2599    ) -> Result<(), String> {
2600        self.jmp(dst, src)
2601    }
2602    fn jset64<'a>(
2603        &mut self,
2604        _context: &mut Self::Context<'a>,
2605        dst: Register,
2606        src: Source,
2607        offset: i16,
2608    ) -> Result<(), String> {
2609        self.jmp(dst, src)
2610    }
2611
2612    fn atomic_add<'a>(
2613        &mut self,
2614        context: &mut Self::Context<'a>,
2615        fetch: bool,
2616        dst: Register,
2617        offset: i16,
2618        src: Register,
2619    ) -> Result<(), String> {
2620        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2621    }
2622
2623    fn atomic_add64<'a>(
2624        &mut self,
2625        context: &mut Self::Context<'a>,
2626        fetch: bool,
2627        dst: Register,
2628        offset: i16,
2629        src: Register,
2630    ) -> Result<(), String> {
2631        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2632    }
2633
2634    fn atomic_and<'a>(
2635        &mut self,
2636        context: &mut Self::Context<'a>,
2637        fetch: bool,
2638        dst: Register,
2639        offset: i16,
2640        src: Register,
2641    ) -> Result<(), String> {
2642        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2643    }
2644
2645    fn atomic_and64<'a>(
2646        &mut self,
2647        context: &mut Self::Context<'a>,
2648        fetch: bool,
2649        dst: Register,
2650        offset: i16,
2651        src: Register,
2652    ) -> Result<(), String> {
2653        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2654    }
2655
2656    fn atomic_or<'a>(
2657        &mut self,
2658        context: &mut Self::Context<'a>,
2659        fetch: bool,
2660        dst: Register,
2661        offset: i16,
2662        src: Register,
2663    ) -> Result<(), String> {
2664        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2665    }
2666
2667    fn atomic_or64<'a>(
2668        &mut self,
2669        context: &mut Self::Context<'a>,
2670        fetch: bool,
2671        dst: Register,
2672        offset: i16,
2673        src: Register,
2674    ) -> Result<(), String> {
2675        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2676    }
2677
2678    fn atomic_xor<'a>(
2679        &mut self,
2680        context: &mut Self::Context<'a>,
2681        fetch: bool,
2682        dst: Register,
2683        offset: i16,
2684        src: Register,
2685    ) -> Result<(), String> {
2686        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2687    }
2688
2689    fn atomic_xor64<'a>(
2690        &mut self,
2691        context: &mut Self::Context<'a>,
2692        fetch: bool,
2693        dst: Register,
2694        offset: i16,
2695        src: Register,
2696    ) -> Result<(), String> {
2697        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2698    }
2699
2700    fn atomic_xchg<'a>(
2701        &mut self,
2702        context: &mut Self::Context<'a>,
2703        fetch: bool,
2704        dst: Register,
2705        offset: i16,
2706        src: Register,
2707    ) -> Result<(), String> {
2708        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U32, false)
2709    }
2710
2711    fn atomic_xchg64<'a>(
2712        &mut self,
2713        context: &mut Self::Context<'a>,
2714        fetch: bool,
2715        dst: Register,
2716        offset: i16,
2717        src: Register,
2718    ) -> Result<(), String> {
2719        self.atomic(&context.computation_context, fetch, dst, offset, src, DataWidth::U64, false)
2720    }
2721
2722    fn atomic_cmpxchg<'a>(
2723        &mut self,
2724        context: &mut Self::Context<'a>,
2725        dst: Register,
2726        offset: i16,
2727        src: Register,
2728    ) -> Result<(), String> {
2729        self.atomic(&context.computation_context, true, dst, offset, src, DataWidth::U32, true)
2730    }
2731
2732    fn atomic_cmpxchg64<'a>(
2733        &mut self,
2734        context: &mut Self::Context<'a>,
2735        dst: Register,
2736        offset: i16,
2737        src: Register,
2738    ) -> Result<(), String> {
2739        self.atomic(&context.computation_context, true, dst, offset, src, DataWidth::U64, true)
2740    }
2741
2742    fn load<'a>(
2743        &mut self,
2744        context: &mut Self::Context<'a>,
2745        dst: Register,
2746        offset: i16,
2747        src: Register,
2748        width: DataWidth,
2749    ) -> Result<(), String> {
2750        let context = &context.computation_context;
2751        if self.registers.contains(&dst) {
2752            let addr = context.reg(src)?;
2753            if let Type::PtrToStack { offset: stack_offset } = addr {
2754                let stack_offset = stack_offset.add(offset);
2755                if !stack_offset.is_valid() {
2756                    return Err(format!("Invalid stack offset at {}", context.pc));
2757                }
2758                self.stack.insert(stack_offset.array_index());
2759            }
2760        }
2761        self.registers.insert(src);
2762        Ok(())
2763    }
2764
2765    fn load64<'a>(
2766        &mut self,
2767        _context: &mut Self::Context<'a>,
2768        dst: Register,
2769        _src: u8,
2770        _lower: u32,
2771    ) -> Result<(), String> {
2772        self.registers.remove(&dst);
2773        Ok(())
2774    }
2775
2776    fn load_from_packet<'a>(
2777        &mut self,
2778        _context: &mut Self::Context<'a>,
2779        dst: Register,
2780        src: Register,
2781        _offset: i32,
2782        register_offset: Option<Register>,
2783        _width: DataWidth,
2784    ) -> Result<(), String> {
2785        // 1 to 5 are scratch registers
2786        for register in 1..6 {
2787            self.registers.remove(&(register as Register));
2788        }
2789        // Only do something if the dst is read, otherwise the computation doesn't matter.
2790        if self.registers.remove(&dst) {
2791            self.registers.insert(src);
2792            if let Some(reg) = register_offset {
2793                self.registers.insert(reg);
2794            }
2795        }
2796        Ok(())
2797    }
2798
2799    fn store<'a>(
2800        &mut self,
2801        context: &mut Self::Context<'a>,
2802        dst: Register,
2803        offset: i16,
2804        src: Source,
2805        width: DataWidth,
2806    ) -> Result<(), String> {
2807        let context = &context.computation_context;
2808        let addr = context.reg(dst)?;
2809        if let Type::PtrToStack { offset: stack_offset } = addr {
2810            let stack_offset = stack_offset.add(offset);
2811            if !stack_offset.is_valid() {
2812                return Err(format!("Invalid stack offset at {}", context.pc));
2813            }
2814            if self.stack.remove(&stack_offset.array_index()) {
2815                if let Source::Reg(src) = src {
2816                    self.registers.insert(src);
2817                }
2818            }
2819        } else {
2820            if let Source::Reg(src) = src {
2821                self.registers.insert(src);
2822            }
2823        }
2824
2825        Ok(())
2826    }
2827}
2828
2829#[derive(Debug)]
2830struct TerminatingContext {
2831    computation_context: ComputationContext,
2832    dependencies: DataDependencies,
2833}
2834
2835#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2836enum AluType {
2837    Plain,
2838    Sub,
2839    Add,
2840}
2841
2842impl AluType {
2843    /// Can this operation be done one a pointer and a scalar.
2844    fn is_ptr_compatible(&self) -> bool {
2845        match self {
2846            Self::Sub | Self::Add => true,
2847            _ => false,
2848        }
2849    }
2850}
2851
2852#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2853enum JumpWidth {
2854    W32,
2855    W64,
2856}
2857
2858#[derive(Clone, Copy, Debug, Eq, PartialEq)]
2859enum JumpType {
2860    Eq,
2861    Ge,
2862    Gt,
2863    Le,
2864    LooseComparaison,
2865    Lt,
2866    Ne,
2867    StrictComparaison,
2868    Unknown,
2869}
2870
2871impl JumpType {
2872    fn invert(&self) -> Self {
2873        match self {
2874            Self::Eq => Self::Ne,
2875            Self::Ge => Self::Lt,
2876            Self::Gt => Self::Le,
2877            Self::Le => Self::Gt,
2878            Self::LooseComparaison => Self::StrictComparaison,
2879            Self::Lt => Self::Ge,
2880            Self::Ne => Self::Eq,
2881            Self::StrictComparaison => Self::LooseComparaison,
2882            Self::Unknown => Self::Unknown,
2883        }
2884    }
2885
2886    fn is_strict(&self) -> bool {
2887        match self {
2888            Self::Gt | Self::Lt | Self::Ne | Self::StrictComparaison => true,
2889            _ => false,
2890        }
2891    }
2892}
2893
2894fn display_register(register: Register) -> String {
2895    format!("%r{register}")
2896}
2897
2898fn display_source(src: Source) -> String {
2899    match src {
2900        Source::Reg(r) => display_register(r),
2901        Source::Value(v) => format!("0x{v:x}"),
2902    }
2903}
2904
2905impl BpfVisitor for ComputationContext {
2906    type Context<'a> = VerificationContext<'a>;
2907
2908    fn add<'a>(
2909        &mut self,
2910        context: &mut Self::Context<'a>,
2911        dst: Register,
2912        src: Source,
2913    ) -> Result<(), String> {
2914        self.alu(Some("add32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x + y))
2915    }
2916    fn add64<'a>(
2917        &mut self,
2918        context: &mut Self::Context<'a>,
2919        dst: Register,
2920        src: Source,
2921    ) -> Result<(), String> {
2922        self.alu(Some("add"), context, dst, src, AluType::Add, |x, y| x + y)
2923    }
2924    fn and<'a>(
2925        &mut self,
2926        context: &mut Self::Context<'a>,
2927        dst: Register,
2928        src: Source,
2929    ) -> Result<(), String> {
2930        self.alu(Some("and32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x & y))
2931    }
2932    fn and64<'a>(
2933        &mut self,
2934        context: &mut Self::Context<'a>,
2935        dst: Register,
2936        src: Source,
2937    ) -> Result<(), String> {
2938        self.alu(Some("and"), context, dst, src, AluType::Plain, |x, y| x & y)
2939    }
2940    fn arsh<'a>(
2941        &mut self,
2942        context: &mut Self::Context<'a>,
2943        dst: Register,
2944        src: Source,
2945    ) -> Result<(), String> {
2946        self.alu(Some("arsh32"), context, dst, src, AluType::Plain, |x, y| {
2947            alu32(x, y, |x, y| x.ashr(y))
2948        })
2949    }
2950    fn arsh64<'a>(
2951        &mut self,
2952        context: &mut Self::Context<'a>,
2953        dst: Register,
2954        src: Source,
2955    ) -> Result<(), String> {
2956        self.alu(Some("arsh"), context, dst, src, AluType::Plain, |x, y| x.ashr(y))
2957    }
2958    fn div<'a>(
2959        &mut self,
2960        context: &mut Self::Context<'a>,
2961        dst: Register,
2962        src: Source,
2963    ) -> Result<(), String> {
2964        self.alu(Some("div32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x / y))
2965    }
2966    fn div64<'a>(
2967        &mut self,
2968        context: &mut Self::Context<'a>,
2969        dst: Register,
2970        src: Source,
2971    ) -> Result<(), String> {
2972        self.alu(Some("div"), context, dst, src, AluType::Plain, |x, y| x / y)
2973    }
2974    fn lsh<'a>(
2975        &mut self,
2976        context: &mut Self::Context<'a>,
2977        dst: Register,
2978        src: Source,
2979    ) -> Result<(), String> {
2980        self.alu(Some("lsh32"), context, dst, src, AluType::Plain, |x, y| {
2981            alu32(x, y, |x, y| x << y)
2982        })
2983    }
2984    fn lsh64<'a>(
2985        &mut self,
2986        context: &mut Self::Context<'a>,
2987        dst: Register,
2988        src: Source,
2989    ) -> Result<(), String> {
2990        self.alu(Some("lsh"), context, dst, src, AluType::Plain, |x, y| x << y)
2991    }
2992    fn r#mod<'a>(
2993        &mut self,
2994        context: &mut Self::Context<'a>,
2995        dst: Register,
2996        src: Source,
2997    ) -> Result<(), String> {
2998        self.alu(Some("mod32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x % y))
2999    }
3000    fn mod64<'a>(
3001        &mut self,
3002        context: &mut Self::Context<'a>,
3003        dst: Register,
3004        src: Source,
3005    ) -> Result<(), String> {
3006        self.alu(Some("mod"), context, dst, src, AluType::Plain, |x, y| x % y)
3007    }
3008    fn mov<'a>(
3009        &mut self,
3010        context: &mut Self::Context<'a>,
3011        dst: Register,
3012        src: Source,
3013    ) -> Result<(), String> {
3014        bpf_log!(self, context, "mov32 {}, {}", display_register(dst), display_source(src));
3015        let src = self.compute_source(src)?;
3016        let value = match src {
3017            Type::ScalarValue(data) => {
3018                let value = (data.value as u32) as u64;
3019                let unknown_mask = (data.unknown_mask as u32) as u64;
3020                let unwritten_mask = (data.unwritten_mask as u32) as u64;
3021                let urange = U64Range::compute_range_for_bytes_swap(0.into(), data.urange, 0, 0, 4);
3022                Type::ScalarValue(ScalarValueData::new(value, unknown_mask, unwritten_mask, urange))
3023            }
3024            _ => Type::default(),
3025        };
3026        let mut next = self.next()?;
3027        next.set_reg(dst, value)?;
3028        context.states.push(next);
3029        Ok(())
3030    }
3031    fn mov64<'a>(
3032        &mut self,
3033        context: &mut Self::Context<'a>,
3034        dst: Register,
3035        src: Source,
3036    ) -> Result<(), String> {
3037        bpf_log!(self, context, "mov {}, {}", display_register(dst), display_source(src));
3038        let src = self.compute_source(src)?;
3039        let mut next = self.next()?;
3040        next.set_reg(dst, src)?;
3041        context.states.push(next);
3042        Ok(())
3043    }
3044    fn mul<'a>(
3045        &mut self,
3046        context: &mut Self::Context<'a>,
3047        dst: Register,
3048        src: Source,
3049    ) -> Result<(), String> {
3050        self.alu(Some("mul32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x * y))
3051    }
3052    fn mul64<'a>(
3053        &mut self,
3054        context: &mut Self::Context<'a>,
3055        dst: Register,
3056        src: Source,
3057    ) -> Result<(), String> {
3058        self.alu(Some("mul"), context, dst, src, AluType::Plain, |x, y| x * y)
3059    }
3060    fn or<'a>(
3061        &mut self,
3062        context: &mut Self::Context<'a>,
3063        dst: Register,
3064        src: Source,
3065    ) -> Result<(), String> {
3066        self.alu(Some("or32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x | y))
3067    }
3068    fn or64<'a>(
3069        &mut self,
3070        context: &mut Self::Context<'a>,
3071        dst: Register,
3072        src: Source,
3073    ) -> Result<(), String> {
3074        self.alu(Some("or"), context, dst, src, AluType::Plain, |x, y| x | y)
3075    }
3076    fn rsh<'a>(
3077        &mut self,
3078        context: &mut Self::Context<'a>,
3079        dst: Register,
3080        src: Source,
3081    ) -> Result<(), String> {
3082        self.alu(Some("rsh32"), context, dst, src, AluType::Plain, |x, y| {
3083            alu32(x, y, |x, y| x >> y)
3084        })
3085    }
3086    fn rsh64<'a>(
3087        &mut self,
3088        context: &mut Self::Context<'a>,
3089        dst: Register,
3090        src: Source,
3091    ) -> Result<(), String> {
3092        self.alu(Some("rsh"), context, dst, src, AluType::Plain, |x, y| x >> y)
3093    }
3094    fn sub<'a>(
3095        &mut self,
3096        context: &mut Self::Context<'a>,
3097        dst: Register,
3098        src: Source,
3099    ) -> Result<(), String> {
3100        self.alu(Some("sub32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x - y))
3101    }
3102    fn sub64<'a>(
3103        &mut self,
3104        context: &mut Self::Context<'a>,
3105        dst: Register,
3106        src: Source,
3107    ) -> Result<(), String> {
3108        self.alu(Some("sub"), context, dst, src, AluType::Sub, |x, y| x - y)
3109    }
3110    fn xor<'a>(
3111        &mut self,
3112        context: &mut Self::Context<'a>,
3113        dst: Register,
3114        src: Source,
3115    ) -> Result<(), String> {
3116        self.alu(Some("xor32"), context, dst, src, AluType::Plain, |x, y| alu32(x, y, |x, y| x ^ y))
3117    }
3118    fn xor64<'a>(
3119        &mut self,
3120        context: &mut Self::Context<'a>,
3121        dst: Register,
3122        src: Source,
3123    ) -> Result<(), String> {
3124        self.alu(Some("xor"), context, dst, src, AluType::Plain, |x, y| x ^ y)
3125    }
3126
3127    fn neg<'a>(&mut self, context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
3128        bpf_log!(self, context, "neg32 {}", display_register(dst));
3129        self.alu(None, context, dst, Source::Value(0), AluType::Plain, |x, y| {
3130            alu32(x, y, |x, _y| -x)
3131        })
3132    }
3133    fn neg64<'a>(&mut self, context: &mut Self::Context<'a>, dst: Register) -> Result<(), String> {
3134        bpf_log!(self, context, "neg {}", display_register(dst));
3135        self.alu(None, context, dst, Source::Value(0), AluType::Plain, |x, _y| -x)
3136    }
3137
3138    fn be<'a>(
3139        &mut self,
3140        context: &mut Self::Context<'a>,
3141        dst: Register,
3142        width: DataWidth,
3143    ) -> Result<(), String> {
3144        self.endianness::<BigEndian>("be", context, dst, width)
3145    }
3146
3147    fn le<'a>(
3148        &mut self,
3149        context: &mut Self::Context<'a>,
3150        dst: Register,
3151        width: DataWidth,
3152    ) -> Result<(), String> {
3153        self.endianness::<LittleEndian>("le", context, dst, width)
3154    }
3155
3156    fn call_external<'a>(
3157        &mut self,
3158        context: &mut Self::Context<'a>,
3159        index: u32,
3160    ) -> Result<(), String> {
3161        bpf_log!(self, context, "call 0x{:x}", index);
3162        let Some(signature) = context.calling_context.helpers.get(&index).cloned() else {
3163            return Err(format!("unknown external function {}", index));
3164        };
3165        debug_assert!(signature.args.len() <= 5);
3166        let mut next = self.next()?;
3167        for (index, arg) in signature.args.iter().enumerate() {
3168            let reg = (index + 1) as u8;
3169            self.reg(reg)?.match_parameter_type(self, arg, index, &mut next)?;
3170        }
3171        // Parameters have been validated, specify the return value on return.
3172        if signature.invalidate_array_bounds {
3173            next.array_bounds.clear();
3174        }
3175        let value =
3176            self.resolve_return_value(context, &signature.return_value, &mut next, false)?;
3177        next.set_reg(0, value)?;
3178        for i in 1..=5 {
3179            next.set_reg(i, Type::default())?;
3180        }
3181        context.states.push(next);
3182        Ok(())
3183    }
3184
3185    fn exit<'a>(&mut self, context: &mut Self::Context<'a>) -> Result<(), String> {
3186        bpf_log!(self, context, "exit");
3187        if !self.reg(0)?.is_written_scalar() {
3188            return Err("register 0 is incorrect at exit time".to_string());
3189        }
3190        if !self.resources.is_empty() {
3191            return Err("some resources have not been released at exit time".to_string());
3192        }
3193        let this = self.clone();
3194        this.terminate(context)?;
3195        // Nothing to do, the program terminated with a valid scalar value.
3196        Ok(())
3197    }
3198
3199    fn jump<'a>(&mut self, context: &mut Self::Context<'a>, offset: i16) -> Result<(), String> {
3200        bpf_log!(self, context, "ja {}", offset);
3201        let parent = Some(Arc::new(self.clone()));
3202        context.states.push(self.jump_with_offset(offset, parent)?);
3203        Ok(())
3204    }
3205
3206    fn jeq<'a>(
3207        &mut self,
3208        context: &mut Self::Context<'a>,
3209        dst: Register,
3210        src: Source,
3211        offset: i16,
3212    ) -> Result<(), String> {
3213        self.conditional_jump(
3214            "jeq32",
3215            context,
3216            dst,
3217            src,
3218            offset,
3219            JumpType::Eq,
3220            JumpWidth::W32,
3221            |x, y| {
3222                comp32(x, y, |x, y| {
3223                    // x == y
3224                    if x.min == x.max && x.min == y.min && x.min == y.max {
3225                        return Some(true);
3226                    }
3227                    if x.max < y.min || y.max < x.min {
3228                        return Some(false);
3229                    }
3230                    None
3231                })
3232            },
3233        )
3234    }
3235    fn jeq64<'a>(
3236        &mut self,
3237        context: &mut Self::Context<'a>,
3238        dst: Register,
3239        src: Source,
3240        offset: i16,
3241    ) -> Result<(), String> {
3242        self.conditional_jump(
3243            "jeq",
3244            context,
3245            dst,
3246            src,
3247            offset,
3248            JumpType::Eq,
3249            JumpWidth::W64,
3250            |x, y| {
3251                comp64(x, y, |x, y| {
3252                    // x == y
3253                    if x.min == x.max && x.min == y.min && x.min == y.max {
3254                        return Some(true);
3255                    }
3256                    if x.max < y.min || y.max < x.min {
3257                        return Some(false);
3258                    }
3259                    None
3260                })
3261            },
3262        )
3263    }
3264    fn jne<'a>(
3265        &mut self,
3266        context: &mut Self::Context<'a>,
3267        dst: Register,
3268        src: Source,
3269        offset: i16,
3270    ) -> Result<(), String> {
3271        self.conditional_jump(
3272            "jne32",
3273            context,
3274            dst,
3275            src,
3276            offset,
3277            JumpType::Ne,
3278            JumpWidth::W32,
3279            |x, y| {
3280                comp32(x, y, |x, y| {
3281                    // x != y
3282                    if x.min == x.max && x.min == y.min && x.min == y.max {
3283                        return Some(false);
3284                    }
3285                    if x.max < y.min || y.max < x.min {
3286                        return Some(true);
3287                    }
3288                    None
3289                })
3290            },
3291        )
3292    }
3293    fn jne64<'a>(
3294        &mut self,
3295        context: &mut Self::Context<'a>,
3296        dst: Register,
3297        src: Source,
3298        offset: i16,
3299    ) -> Result<(), String> {
3300        self.conditional_jump(
3301            "jne",
3302            context,
3303            dst,
3304            src,
3305            offset,
3306            JumpType::Ne,
3307            JumpWidth::W64,
3308            |x, y| {
3309                comp64(x, y, |x, y| {
3310                    // x != y
3311                    if x.min == x.max && x.min == y.min && x.min == y.max {
3312                        return Some(false);
3313                    }
3314                    if x.max < y.min || y.max < x.min {
3315                        return Some(true);
3316                    }
3317                    None
3318                })
3319            },
3320        )
3321    }
3322    fn jge<'a>(
3323        &mut self,
3324        context: &mut Self::Context<'a>,
3325        dst: Register,
3326        src: Source,
3327        offset: i16,
3328    ) -> Result<(), String> {
3329        self.conditional_jump(
3330            "jge32",
3331            context,
3332            dst,
3333            src,
3334            offset,
3335            JumpType::Ge,
3336            JumpWidth::W32,
3337            |x, y| {
3338                comp32(x, y, |x, y| {
3339                    // x >= y
3340                    if x.min >= y.max {
3341                        return Some(true);
3342                    }
3343                    if y.min > x.max {
3344                        return Some(false);
3345                    }
3346                    None
3347                })
3348            },
3349        )
3350    }
3351    fn jge64<'a>(
3352        &mut self,
3353        context: &mut Self::Context<'a>,
3354        dst: Register,
3355        src: Source,
3356        offset: i16,
3357    ) -> Result<(), String> {
3358        self.conditional_jump(
3359            "jge",
3360            context,
3361            dst,
3362            src,
3363            offset,
3364            JumpType::Ge,
3365            JumpWidth::W64,
3366            |x, y| {
3367                comp64(x, y, |x, y| {
3368                    // x >= y
3369                    if x.min >= y.max {
3370                        return Some(true);
3371                    }
3372                    if y.min > x.max {
3373                        return Some(false);
3374                    }
3375                    None
3376                })
3377            },
3378        )
3379    }
3380    fn jgt<'a>(
3381        &mut self,
3382        context: &mut Self::Context<'a>,
3383        dst: Register,
3384        src: Source,
3385        offset: i16,
3386    ) -> Result<(), String> {
3387        self.conditional_jump(
3388            "jgt32",
3389            context,
3390            dst,
3391            src,
3392            offset,
3393            JumpType::Gt,
3394            JumpWidth::W32,
3395            |x, y| {
3396                comp32(x, y, |x, y| {
3397                    // x > y
3398                    if x.min > y.max {
3399                        return Some(true);
3400                    }
3401                    if y.min >= x.max {
3402                        return Some(false);
3403                    }
3404                    None
3405                })
3406            },
3407        )
3408    }
3409    fn jgt64<'a>(
3410        &mut self,
3411        context: &mut Self::Context<'a>,
3412        dst: Register,
3413        src: Source,
3414        offset: i16,
3415    ) -> Result<(), String> {
3416        self.conditional_jump(
3417            "jgt",
3418            context,
3419            dst,
3420            src,
3421            offset,
3422            JumpType::Gt,
3423            JumpWidth::W64,
3424            |x, y| {
3425                comp64(x, y, |x, y| {
3426                    // x > y
3427                    if x.min > y.max {
3428                        return Some(true);
3429                    }
3430                    if y.min >= x.max {
3431                        return Some(false);
3432                    }
3433                    None
3434                })
3435            },
3436        )
3437    }
3438    fn jle<'a>(
3439        &mut self,
3440        context: &mut Self::Context<'a>,
3441        dst: Register,
3442        src: Source,
3443        offset: i16,
3444    ) -> Result<(), String> {
3445        self.conditional_jump(
3446            "jle32",
3447            context,
3448            dst,
3449            src,
3450            offset,
3451            JumpType::Le,
3452            JumpWidth::W32,
3453            |x, y| {
3454                comp32(x, y, |x, y| {
3455                    // x <= y
3456                    if x.max <= y.min {
3457                        return Some(true);
3458                    }
3459                    if y.max < x.min {
3460                        return Some(false);
3461                    }
3462                    None
3463                })
3464            },
3465        )
3466    }
3467    fn jle64<'a>(
3468        &mut self,
3469        context: &mut Self::Context<'a>,
3470        dst: Register,
3471        src: Source,
3472        offset: i16,
3473    ) -> Result<(), String> {
3474        self.conditional_jump(
3475            "jle",
3476            context,
3477            dst,
3478            src,
3479            offset,
3480            JumpType::Le,
3481            JumpWidth::W64,
3482            |x, y| {
3483                comp64(x, y, |x, y| {
3484                    // x <= y
3485                    if x.max <= y.min {
3486                        return Some(true);
3487                    }
3488                    if y.max < x.min {
3489                        return Some(false);
3490                    }
3491                    None
3492                })
3493            },
3494        )
3495    }
3496    fn jlt<'a>(
3497        &mut self,
3498        context: &mut Self::Context<'a>,
3499        dst: Register,
3500        src: Source,
3501        offset: i16,
3502    ) -> Result<(), String> {
3503        self.conditional_jump(
3504            "jlt32",
3505            context,
3506            dst,
3507            src,
3508            offset,
3509            JumpType::Lt,
3510            JumpWidth::W32,
3511            |x, y| {
3512                comp32(x, y, |x, y| {
3513                    // x < y
3514                    if x.max < y.min {
3515                        return Some(true);
3516                    }
3517                    if y.max <= x.min {
3518                        return Some(false);
3519                    }
3520                    None
3521                })
3522            },
3523        )
3524    }
3525    fn jlt64<'a>(
3526        &mut self,
3527        context: &mut Self::Context<'a>,
3528        dst: Register,
3529        src: Source,
3530        offset: i16,
3531    ) -> Result<(), String> {
3532        self.conditional_jump(
3533            "jlt",
3534            context,
3535            dst,
3536            src,
3537            offset,
3538            JumpType::Lt,
3539            JumpWidth::W64,
3540            |x, y| {
3541                comp64(x, y, |x, y| {
3542                    // x < y
3543                    if x.max < y.min {
3544                        return Some(true);
3545                    }
3546                    if y.max <= x.min {
3547                        return Some(false);
3548                    }
3549                    None
3550                })
3551            },
3552        )
3553    }
3554    fn jsge<'a>(
3555        &mut self,
3556        context: &mut Self::Context<'a>,
3557        dst: Register,
3558        src: Source,
3559        offset: i16,
3560    ) -> Result<(), String> {
3561        self.conditional_jump(
3562            "jsge32",
3563            context,
3564            dst,
3565            src,
3566            offset,
3567            JumpType::LooseComparaison,
3568            JumpWidth::W32,
3569            |x, y| scomp32(x, y, |x, y| x >= y),
3570        )
3571    }
3572    fn jsge64<'a>(
3573        &mut self,
3574        context: &mut Self::Context<'a>,
3575        dst: Register,
3576        src: Source,
3577        offset: i16,
3578    ) -> Result<(), String> {
3579        self.conditional_jump(
3580            "jsge",
3581            context,
3582            dst,
3583            src,
3584            offset,
3585            JumpType::LooseComparaison,
3586            JumpWidth::W64,
3587            |x, y| scomp64(x, y, |x, y| x >= y),
3588        )
3589    }
3590    fn jsgt<'a>(
3591        &mut self,
3592        context: &mut Self::Context<'a>,
3593        dst: Register,
3594        src: Source,
3595        offset: i16,
3596    ) -> Result<(), String> {
3597        self.conditional_jump(
3598            "jsgt32",
3599            context,
3600            dst,
3601            src,
3602            offset,
3603            JumpType::StrictComparaison,
3604            JumpWidth::W32,
3605            |x, y| scomp32(x, y, |x, y| x > y),
3606        )
3607    }
3608    fn jsgt64<'a>(
3609        &mut self,
3610        context: &mut Self::Context<'a>,
3611        dst: Register,
3612        src: Source,
3613        offset: i16,
3614    ) -> Result<(), String> {
3615        self.conditional_jump(
3616            "jsgt",
3617            context,
3618            dst,
3619            src,
3620            offset,
3621            JumpType::StrictComparaison,
3622            JumpWidth::W64,
3623            |x, y| scomp64(x, y, |x, y| x > y),
3624        )
3625    }
3626    fn jsle<'a>(
3627        &mut self,
3628        context: &mut Self::Context<'a>,
3629        dst: Register,
3630        src: Source,
3631        offset: i16,
3632    ) -> Result<(), String> {
3633        self.conditional_jump(
3634            "jsle32",
3635            context,
3636            dst,
3637            src,
3638            offset,
3639            JumpType::LooseComparaison,
3640            JumpWidth::W32,
3641            |x, y| scomp32(x, y, |x, y| x <= y),
3642        )
3643    }
3644    fn jsle64<'a>(
3645        &mut self,
3646        context: &mut Self::Context<'a>,
3647        dst: Register,
3648        src: Source,
3649        offset: i16,
3650    ) -> Result<(), String> {
3651        self.conditional_jump(
3652            "jsle",
3653            context,
3654            dst,
3655            src,
3656            offset,
3657            JumpType::LooseComparaison,
3658            JumpWidth::W64,
3659            |x, y| scomp64(x, y, |x, y| x <= y),
3660        )
3661    }
3662    fn jslt<'a>(
3663        &mut self,
3664        context: &mut Self::Context<'a>,
3665        dst: Register,
3666        src: Source,
3667        offset: i16,
3668    ) -> Result<(), String> {
3669        self.conditional_jump(
3670            "jslt32",
3671            context,
3672            dst,
3673            src,
3674            offset,
3675            JumpType::StrictComparaison,
3676            JumpWidth::W32,
3677            |x, y| scomp32(x, y, |x, y| x < y),
3678        )
3679    }
3680    fn jslt64<'a>(
3681        &mut self,
3682        context: &mut Self::Context<'a>,
3683        dst: Register,
3684        src: Source,
3685        offset: i16,
3686    ) -> Result<(), String> {
3687        self.conditional_jump(
3688            "jslt",
3689            context,
3690            dst,
3691            src,
3692            offset,
3693            JumpType::StrictComparaison,
3694            JumpWidth::W64,
3695            |x, y| scomp64(x, y, |x, y| x < y),
3696        )
3697    }
3698    fn jset<'a>(
3699        &mut self,
3700        context: &mut Self::Context<'a>,
3701        dst: Register,
3702        src: Source,
3703        offset: i16,
3704    ) -> Result<(), String> {
3705        self.conditional_jump(
3706            "jset32",
3707            context,
3708            dst,
3709            src,
3710            offset,
3711            JumpType::Unknown,
3712            JumpWidth::W32,
3713            |x, y| {
3714                comp32(x, y, |x, y| {
3715                    // x & y != 0
3716                    if x.min != x.max || y.min != y.max {
3717                        return None;
3718                    }
3719                    Some(x.min & y.min != 0)
3720                })
3721            },
3722        )
3723    }
3724    fn jset64<'a>(
3725        &mut self,
3726        context: &mut Self::Context<'a>,
3727        dst: Register,
3728        src: Source,
3729        offset: i16,
3730    ) -> Result<(), String> {
3731        self.conditional_jump(
3732            "jset",
3733            context,
3734            dst,
3735            src,
3736            offset,
3737            JumpType::Unknown,
3738            JumpWidth::W64,
3739            |x, y| {
3740                comp64(x, y, |x, y| {
3741                    // x & y != 0
3742                    if x.min != x.max || y.min != y.max {
3743                        return None;
3744                    }
3745                    Some(x.min & y.min != 0)
3746                })
3747            },
3748        )
3749    }
3750
3751    fn atomic_add<'a>(
3752        &mut self,
3753        context: &mut Self::Context<'a>,
3754        fetch: bool,
3755        dst: Register,
3756        offset: i16,
3757        src: Register,
3758    ) -> Result<(), String> {
3759        self.atomic_operation(
3760            "add32",
3761            context,
3762            DataWidth::U32,
3763            fetch,
3764            dst,
3765            offset,
3766            src,
3767            AluType::Add,
3768            |x, y| alu32(x, y, |x, y| x + y),
3769        )
3770    }
3771
3772    fn atomic_add64<'a>(
3773        &mut self,
3774        context: &mut Self::Context<'a>,
3775        fetch: bool,
3776        dst: Register,
3777        offset: i16,
3778        src: Register,
3779    ) -> Result<(), String> {
3780        self.atomic_operation(
3781            "add",
3782            context,
3783            DataWidth::U64,
3784            fetch,
3785            dst,
3786            offset,
3787            src,
3788            AluType::Add,
3789            |x, y| x + y,
3790        )
3791    }
3792
3793    fn atomic_and<'a>(
3794        &mut self,
3795        context: &mut Self::Context<'a>,
3796        fetch: bool,
3797        dst: Register,
3798        offset: i16,
3799        src: Register,
3800    ) -> Result<(), String> {
3801        self.atomic_operation(
3802            "and32",
3803            context,
3804            DataWidth::U32,
3805            fetch,
3806            dst,
3807            offset,
3808            src,
3809            AluType::Plain,
3810            |x, y| alu32(x, y, |x, y| x & y),
3811        )
3812    }
3813
3814    fn atomic_and64<'a>(
3815        &mut self,
3816        context: &mut Self::Context<'a>,
3817        fetch: bool,
3818        dst: Register,
3819        offset: i16,
3820        src: Register,
3821    ) -> Result<(), String> {
3822        self.atomic_operation(
3823            "and",
3824            context,
3825            DataWidth::U64,
3826            fetch,
3827            dst,
3828            offset,
3829            src,
3830            AluType::Plain,
3831            |x, y| x & y,
3832        )
3833    }
3834
3835    fn atomic_or<'a>(
3836        &mut self,
3837        context: &mut Self::Context<'a>,
3838        fetch: bool,
3839        dst: Register,
3840        offset: i16,
3841        src: Register,
3842    ) -> Result<(), String> {
3843        self.atomic_operation(
3844            "or32",
3845            context,
3846            DataWidth::U32,
3847            fetch,
3848            dst,
3849            offset,
3850            src,
3851            AluType::Plain,
3852            |x, y| alu32(x, y, |x, y| x | y),
3853        )
3854    }
3855
3856    fn atomic_or64<'a>(
3857        &mut self,
3858        context: &mut Self::Context<'a>,
3859        fetch: bool,
3860        dst: Register,
3861        offset: i16,
3862        src: Register,
3863    ) -> Result<(), String> {
3864        self.atomic_operation(
3865            "or",
3866            context,
3867            DataWidth::U64,
3868            fetch,
3869            dst,
3870            offset,
3871            src,
3872            AluType::Plain,
3873            |x, y| x | y,
3874        )
3875    }
3876
3877    fn atomic_xor<'a>(
3878        &mut self,
3879        context: &mut Self::Context<'a>,
3880        fetch: bool,
3881        dst: Register,
3882        offset: i16,
3883        src: Register,
3884    ) -> Result<(), String> {
3885        self.atomic_operation(
3886            "xor32",
3887            context,
3888            DataWidth::U32,
3889            fetch,
3890            dst,
3891            offset,
3892            src,
3893            AluType::Plain,
3894            |x, y| alu32(x, y, |x, y| x ^ y),
3895        )
3896    }
3897
3898    fn atomic_xor64<'a>(
3899        &mut self,
3900        context: &mut Self::Context<'a>,
3901        fetch: bool,
3902        dst: Register,
3903        offset: i16,
3904        src: Register,
3905    ) -> Result<(), String> {
3906        self.atomic_operation(
3907            "xor",
3908            context,
3909            DataWidth::U64,
3910            fetch,
3911            dst,
3912            offset,
3913            src,
3914            AluType::Plain,
3915            |x, y| x ^ y,
3916        )
3917    }
3918
3919    fn atomic_xchg<'a>(
3920        &mut self,
3921        context: &mut Self::Context<'a>,
3922        fetch: bool,
3923        dst: Register,
3924        offset: i16,
3925        src: Register,
3926    ) -> Result<(), String> {
3927        self.atomic_operation(
3928            "xchg32",
3929            context,
3930            DataWidth::U32,
3931            fetch,
3932            dst,
3933            offset,
3934            src,
3935            AluType::Plain,
3936            |_, x| x,
3937        )
3938    }
3939
3940    fn atomic_xchg64<'a>(
3941        &mut self,
3942        context: &mut Self::Context<'a>,
3943        fetch: bool,
3944        dst: Register,
3945        offset: i16,
3946        src: Register,
3947    ) -> Result<(), String> {
3948        self.raw_atomic_operation(
3949            "xchg",
3950            context,
3951            DataWidth::U64,
3952            fetch,
3953            dst,
3954            offset,
3955            src,
3956            |_, _, x| Ok(x),
3957        )
3958    }
3959
3960    fn atomic_cmpxchg<'a>(
3961        &mut self,
3962        context: &mut Self::Context<'a>,
3963        dst: Register,
3964        offset: i16,
3965        src: Register,
3966    ) -> Result<(), String> {
3967        self.raw_atomic_cmpxchg("cmpxchg32", context, dst, offset, src, JumpWidth::W32, |x, y| {
3968            comp32(x, y, |x, y| {
3969                // x == y
3970                if x.min == x.max && x.min == y.min && x.min == y.max {
3971                    return Some(true);
3972                }
3973                if x.max < y.min || y.max < x.min {
3974                    return Some(false);
3975                }
3976                None
3977            })
3978        })
3979    }
3980
3981    fn atomic_cmpxchg64<'a>(
3982        &mut self,
3983        context: &mut Self::Context<'a>,
3984        dst: Register,
3985        offset: i16,
3986        src: Register,
3987    ) -> Result<(), String> {
3988        self.raw_atomic_cmpxchg("cmpxchg", context, dst, offset, src, JumpWidth::W64, |x, y| {
3989            comp64(x, y, |x, y| {
3990                // x == y
3991                if x.min == x.max && x.min == y.min && x.min == y.max {
3992                    return Some(true);
3993                }
3994                if x.max < y.min || y.max < x.min {
3995                    return Some(false);
3996                }
3997                None
3998            })
3999        })
4000    }
4001
4002    fn load<'a>(
4003        &mut self,
4004        context: &mut Self::Context<'a>,
4005        dst: Register,
4006        offset: i16,
4007        src: Register,
4008        width: DataWidth,
4009    ) -> Result<(), String> {
4010        bpf_log!(
4011            self,
4012            context,
4013            "ldx{} {}, [{}{}]",
4014            width.str(),
4015            display_register(dst),
4016            display_register(src),
4017            print_offset(offset)
4018        );
4019        let addr = self.reg(src)?;
4020        let loaded_type = self.load_memory(context, &addr, Field::new(offset, width))?;
4021        let mut next = self.next()?;
4022        next.set_reg(dst, loaded_type)?;
4023        context.states.push(next);
4024        Ok(())
4025    }
4026
4027    fn load64<'a>(
4028        &mut self,
4029        context: &mut Self::Context<'a>,
4030        dst: Register,
4031        src: u8,
4032        lower: u32,
4033    ) -> Result<(), String> {
4034        let Some(next_instruction) = context.code.get(self.pc + 1) else {
4035            return Err(format!("incomplete lddw"));
4036        };
4037        if next_instruction.src_reg() != 0 || next_instruction.dst_reg() != 0 {
4038            return Err(format!("invalid lddw"));
4039        }
4040
4041        let value = match src {
4042            0 => {
4043                let value = (lower as u64) | (((next_instruction.imm() as u32) as u64) << 32);
4044                bpf_log!(self, context, "lddw {}, 0x{:x}", display_register(dst), value);
4045                Type::from(value)
4046            }
4047            BPF_PSEUDO_MAP_IDX => {
4048                let map_index = lower;
4049                bpf_log!(
4050                    self,
4051                    context,
4052                    "lddw {}, map_by_index({:x})",
4053                    display_register(dst),
4054                    map_index
4055                );
4056                context
4057                    .calling_context
4058                    .maps
4059                    .get(usize::try_from(map_index).unwrap())
4060                    .map(|schema| Type::ConstPtrToMap { id: map_index.into(), schema: *schema })
4061                    .ok_or_else(|| format!("lddw with invalid map index: {}", map_index))?
4062            }
4063            BPF_PSEUDO_MAP_IDX_VALUE => {
4064                let map_index = lower;
4065                let offset = next_instruction.imm();
4066                bpf_log!(
4067                    self,
4068                    context,
4069                    "lddw {}, map_value_by_index({:x})+{offset}",
4070                    display_register(dst),
4071                    map_index
4072                );
4073                let id = context.next_id();
4074                let map_schema = context
4075                    .calling_context
4076                    .maps
4077                    .get(usize::try_from(map_index).unwrap())
4078                    .ok_or_else(|| format!("lddw with invalid map index: {}", map_index))?;
4079
4080                if map_schema.map_type != bpf_map_type_BPF_MAP_TYPE_ARRAY {
4081                    return Err(format!(
4082                        "Invalid map type at index {map_index} for lddw. Expecting array."
4083                    ));
4084                }
4085                if map_schema.max_entries == 0 {
4086                    return Err(format!("Array has no entry."));
4087                }
4088
4089                Type::PtrToMemory {
4090                    id: MemoryId::from(id),
4091                    offset: offset.into(),
4092                    buffer_size: map_schema.value_size.into(),
4093                }
4094            }
4095            _ => {
4096                return Err(format!("invalid lddw"));
4097            }
4098        };
4099
4100        let parent = Some(Arc::new(self.clone()));
4101        let mut next = self.jump_with_offset(1, parent)?;
4102        next.set_reg(dst, value.into())?;
4103
4104        context.states.push(next);
4105        Ok(())
4106    }
4107
4108    fn load_from_packet<'a>(
4109        &mut self,
4110        context: &mut Self::Context<'a>,
4111        dst: Register,
4112        src: Register,
4113        offset: i32,
4114        register_offset: Option<Register>,
4115        width: DataWidth,
4116    ) -> Result<(), String> {
4117        bpf_log!(
4118            self,
4119            context,
4120            "ldp{} {}{}",
4121            width.str(),
4122            register_offset.map(display_register).unwrap_or_else(Default::default),
4123            print_offset(offset)
4124        );
4125
4126        // Verify that `src` refers to a packet.
4127        let src_type = self.reg(src)?;
4128        let src_is_packet = match &context.calling_context.packet_type {
4129            Some(packet_type) => src_type == *packet_type,
4130            None => false,
4131        };
4132        if !src_is_packet {
4133            return Err(format!("R{} is not a packet", src));
4134        }
4135
4136        if let Some(reg) = register_offset {
4137            let reg = self.reg(reg)?;
4138            if !reg.is_written_scalar() {
4139                return Err("access to unwritten offset".to_string());
4140            }
4141        }
4142        // Handle the case where the load succeed.
4143        let mut next = self.next()?;
4144        next.set_reg(dst, Type::UNKNOWN_SCALAR)?;
4145        for i in 1..=5 {
4146            next.set_reg(i, Type::default())?;
4147        }
4148        context.states.push(next);
4149        // Handle the case where the load fails.
4150        if !self.reg(0)?.is_written_scalar() {
4151            return Err("register 0 is incorrect at exit time".to_string());
4152        }
4153        if !self.resources.is_empty() {
4154            return Err("some resources have not been released at exit time".to_string());
4155        }
4156        let this = self.clone();
4157        this.terminate(context)?;
4158        // Nothing to do, the program terminated with a valid scalar value.
4159        Ok(())
4160    }
4161
4162    fn store<'a>(
4163        &mut self,
4164        context: &mut Self::Context<'a>,
4165        dst: Register,
4166        offset: i16,
4167        src: Source,
4168        width: DataWidth,
4169    ) -> Result<(), String> {
4170        let value = match src {
4171            Source::Reg(r) => {
4172                bpf_log!(
4173                    self,
4174                    context,
4175                    "stx{} [{}{}], {}",
4176                    width.str(),
4177                    display_register(dst),
4178                    print_offset(offset),
4179                    display_register(r),
4180                );
4181                self.reg(r)?
4182            }
4183            Source::Value(v) => {
4184                bpf_log!(
4185                    self,
4186                    context,
4187                    "st{} [{}{}], 0x{:x}",
4188                    width.str(),
4189                    display_register(dst),
4190                    print_offset(offset),
4191                    v,
4192                );
4193                Type::from(v & Type::mask(width))
4194            }
4195        };
4196        let mut next = self.next()?;
4197        let addr = self.reg(dst)?;
4198        next.store_memory(context, &addr, Field::new(offset, width), value)?;
4199        context.states.push(next);
4200        Ok(())
4201    }
4202}
4203
4204fn alu32(
4205    x: ScalarValueData,
4206    y: ScalarValueData,
4207    op: impl FnOnce(U32ScalarValueData, U32ScalarValueData) -> U32ScalarValueData,
4208) -> ScalarValueData {
4209    op(U32ScalarValueData::from(x), U32ScalarValueData::from(y)).into()
4210}
4211
4212fn comp64(
4213    x: ScalarValueData,
4214    y: ScalarValueData,
4215    op: impl FnOnce(U64Range, U64Range) -> Option<bool>,
4216) -> Result<Option<bool>, ()> {
4217    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4218        return Err(());
4219    }
4220    Ok(op(x.urange, y.urange))
4221}
4222
4223fn comp32(
4224    x: ScalarValueData,
4225    y: ScalarValueData,
4226    op: impl FnOnce(U32Range, U32Range) -> Option<bool>,
4227) -> Result<Option<bool>, ()> {
4228    let x = U32ScalarValueData::from(x);
4229    let y = U32ScalarValueData::from(y);
4230    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4231        return Err(());
4232    }
4233    Ok(op(x.urange, y.urange))
4234}
4235
4236fn scomp64(
4237    x: ScalarValueData,
4238    y: ScalarValueData,
4239    op: impl FnOnce(i64, i64) -> bool,
4240) -> Result<Option<bool>, ()> {
4241    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4242        return Err(());
4243    }
4244    if !x.is_known() || !y.is_known() {
4245        return Ok(None);
4246    }
4247    Ok(Some(op(x.value as i64, y.value as i64)))
4248}
4249
4250fn scomp32(
4251    x: ScalarValueData,
4252    y: ScalarValueData,
4253    op: impl FnOnce(i32, i32) -> bool,
4254) -> Result<Option<bool>, ()> {
4255    let x = U32ScalarValueData::from(x);
4256    let y = U32ScalarValueData::from(y);
4257    if !x.is_fully_initialized() || !y.is_fully_initialized() {
4258        return Err(());
4259    }
4260    if !x.is_known() || !y.is_known() {
4261        return Ok(None);
4262    }
4263    Ok(Some(op(x.value as i32, y.value as i32)))
4264}
4265
4266fn print_offset<T: Into<i32>>(offset: T) -> String {
4267    let offset: i32 = offset.into();
4268    if offset == 0 {
4269        String::new()
4270    } else if offset > 0 {
4271        format!("+{offset}")
4272    } else {
4273        format!("{offset}")
4274    }
4275}
4276
4277fn run_on_stack_offset<F>(v: StackOffset, f: F) -> StackOffset
4278where
4279    F: FnOnce(ScalarValueData) -> ScalarValueData,
4280{
4281    StackOffset(f(v.reg()))
4282}
4283
4284fn error_and_log<T>(
4285    logger: &mut dyn VerifierLogger,
4286    msg: impl std::string::ToString,
4287) -> Result<T, EbpfError> {
4288    let msg = msg.to_string();
4289    logger.log(msg.as_bytes());
4290    return Err(EbpfError::ProgramVerifyError(msg));
4291}
4292
4293fn associate_orderings(o1: Ordering, o2: Ordering) -> Option<Ordering> {
4294    match (o1, o2) {
4295        (o1, o2) if o1 == o2 => Some(o1),
4296        (o, Ordering::Equal) | (Ordering::Equal, o) => Some(o),
4297        _ => None,
4298    }
4299}
4300
4301#[cfg(test)]
4302mod tests {
4303    use super::*;
4304    use std::collections::BTreeSet;
4305    use test_util::{assert_geq, assert_leq};
4306
4307    #[test]
4308    fn test_type_ordering() {
4309        let t0 = Type::from(0);
4310        let t1 = Type::from(1);
4311        let random = Type::AliasParameter { parameter_index: 8 };
4312        let unknown_written = Type::UNKNOWN_SCALAR;
4313        let unwritten = Type::default();
4314
4315        assert_eq!(t0.partial_cmp(&t0), Some(Ordering::Equal));
4316        assert_eq!(t0.partial_cmp(&t1), None);
4317        assert_eq!(t0.partial_cmp(&random), None);
4318        assert_eq!(t0.partial_cmp(&unknown_written), Some(Ordering::Less));
4319        assert_eq!(t0.partial_cmp(&unwritten), Some(Ordering::Less));
4320
4321        assert_eq!(t1.partial_cmp(&t0), None);
4322        assert_eq!(t1.partial_cmp(&t1), Some(Ordering::Equal));
4323        assert_eq!(t1.partial_cmp(&random), None);
4324        assert_eq!(t1.partial_cmp(&unknown_written), Some(Ordering::Less));
4325        assert_eq!(t1.partial_cmp(&unwritten), Some(Ordering::Less));
4326
4327        assert_eq!(random.partial_cmp(&t0), None);
4328        assert_eq!(random.partial_cmp(&t1), None);
4329        assert_eq!(random.partial_cmp(&random), Some(Ordering::Equal));
4330        assert_eq!(random.partial_cmp(&unknown_written), None);
4331        assert_eq!(random.partial_cmp(&unwritten), Some(Ordering::Less));
4332
4333        assert_eq!(unknown_written.partial_cmp(&t0), Some(Ordering::Greater));
4334        assert_eq!(unknown_written.partial_cmp(&t1), Some(Ordering::Greater));
4335        assert_eq!(unknown_written.partial_cmp(&random), None);
4336        assert_eq!(unknown_written.partial_cmp(&unknown_written), Some(Ordering::Equal));
4337        assert_eq!(unknown_written.partial_cmp(&unwritten), Some(Ordering::Less));
4338
4339        assert_eq!(unwritten.partial_cmp(&t0), Some(Ordering::Greater));
4340        assert_eq!(unwritten.partial_cmp(&t1), Some(Ordering::Greater));
4341        assert_eq!(unwritten.partial_cmp(&random), Some(Ordering::Greater));
4342        assert_eq!(unwritten.partial_cmp(&unknown_written), Some(Ordering::Greater));
4343        assert_eq!(unwritten.partial_cmp(&unwritten), Some(Ordering::Equal));
4344    }
4345
4346    #[test]
4347    fn test_stack_ordering() {
4348        let mut s1 = Stack::default();
4349        let mut s2 = Stack::default();
4350
4351        assert_eq!(s1.partial_cmp(&s2), Some(Ordering::Equal));
4352        s1.set(0, 0.into());
4353        assert_eq!(s1.partial_cmp(&s2), Some(Ordering::Less));
4354        assert_eq!(s2.partial_cmp(&s1), Some(Ordering::Greater));
4355        s2.set(1, 1.into());
4356        assert_eq!(s1.partial_cmp(&s2), None);
4357        assert_eq!(s2.partial_cmp(&s1), None);
4358    }
4359
4360    #[test]
4361    fn test_context_ordering() {
4362        let mut c1 = ComputationContext::default();
4363        let mut c2 = ComputationContext::default();
4364
4365        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Equal));
4366
4367        c1.array_bounds.insert(1.into(), 5);
4368        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Less));
4369        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Greater));
4370
4371        c2.array_bounds.insert(1.into(), 7);
4372        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4373        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4374
4375        c1.array_bounds.insert(2.into(), 9);
4376        assert_eq!(c1.partial_cmp(&c2), None);
4377        assert_eq!(c2.partial_cmp(&c1), None);
4378
4379        c2.array_bounds.insert(2.into(), 9);
4380        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4381        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4382
4383        c2.array_bounds.insert(3.into(), 12);
4384        assert_eq!(c1.partial_cmp(&c2), Some(Ordering::Greater));
4385        assert_eq!(c2.partial_cmp(&c1), Some(Ordering::Less));
4386
4387        c1.pc = 8;
4388        assert_eq!(c1.partial_cmp(&c2), None);
4389        assert_eq!(c2.partial_cmp(&c1), None);
4390    }
4391
4392    #[test]
4393    fn test_stack_access() {
4394        let mut s = Stack::default();
4395
4396        // Store data in the range [8, 26) and verify that `read_data_ptr()` fails for any
4397        // reads outside of that range.
4398        assert!(s.store(StackOffset(8.into()), Type::UNKNOWN_SCALAR, DataWidth::U64).is_ok());
4399        assert!(s.store(StackOffset(16.into()), Type::UNKNOWN_SCALAR, DataWidth::U64).is_ok());
4400        assert!(s.store(StackOffset(24.into()), Type::UNKNOWN_SCALAR, DataWidth::U16).is_ok());
4401
4402        for offset in 0..32 {
4403            for end in (offset + 1)..32 {
4404                assert_eq!(
4405                    s.read_data_ptr(2, StackOffset(offset.into()), (end - offset) as u64).is_ok(),
4406                    offset >= 8 && end <= 26
4407                );
4408            }
4409        }
4410
4411        // Verify that overflows are handled properly.
4412        assert!(s.read_data_ptr(2, StackOffset(12.into()), u64::MAX - 2).is_err());
4413    }
4414
4415    #[test]
4416    fn test_compute_range_for_bytes_swap() {
4417        // Build a list of interesting values. Interesting values are all possible combination of
4418        // bytes being either 0, max value, or an intermediary value.
4419        let mut values = BTreeSet::<u64>::default();
4420        for v1 in &[0x00, 0x1, u64::MAX] {
4421            for v2 in &[0x00, 0x1, u64::MAX] {
4422                for v3 in &[0x00, 0x1, u64::MAX] {
4423                    values.insert(U64Range::assemble_slices((*v1, *v2, *v3), 1, 1));
4424                }
4425            }
4426        }
4427        // Replace the second byte of old by the first byte of new and return the result.
4428        let store = |old: u64, new: u64| (old & !0xff00) | ((new & 0xff) << 8);
4429
4430        for old in &values {
4431            for new in &values {
4432                let s = store(*old, *new);
4433                for min_old in values.iter().filter(|v| *v <= old) {
4434                    for min_new in values.iter().filter(|v| *v <= new) {
4435                        for max_old in values.iter().filter(|v| *v >= old) {
4436                            for max_new in values.iter().filter(|v| *v >= new) {
4437                                let range = U64Range::compute_range_for_bytes_swap(
4438                                    U64Range::new(*min_old, *max_old),
4439                                    U64Range::new(*min_new, *max_new),
4440                                    1,
4441                                    0,
4442                                    1,
4443                                );
4444                                assert_leq!(range.min, s);
4445                                assert_geq!(range.max, s);
4446                            }
4447                        }
4448                    }
4449                }
4450            }
4451        }
4452    }
4453}