_core_rustc_static/
hooks.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use heapdump_vmo::stack_trace_compression;
6use std::ffi::c_void;
7
8use crate::{with_profiler, PerThreadData, Profiler};
9
10const STACK_TRACE_MAXIMUM_DEPTH: usize = 64;
11const STACK_TRACE_MAXIMUM_COMPRESSED_SIZE: usize =
12    stack_trace_compression::max_compressed_size(STACK_TRACE_MAXIMUM_DEPTH);
13
14extern "C" {
15    fn __sanitizer_fast_backtrace(buffer: *mut u64, buffer_size: usize) -> usize;
16}
17
18// Like `with_profiler`, but pass the current timestamp and the compressed call stack too.
19//
20// Note: This is function is `inline(always)` so that it doesn't appear in the stack trace.
21#[inline(always)]
22fn with_profiler_and_call_site(
23    f: impl FnOnce(&Profiler, &mut PerThreadData, zx::MonotonicInstant, &[u8]),
24) {
25    // Collect the timestamp as early as possible.
26    let timestamp = zx::MonotonicInstant::get();
27
28    // Collect stack trace outside of the recursion guard to avoid including it in the stack trace.
29    let mut stack_buf = [0; STACK_TRACE_MAXIMUM_DEPTH];
30    let stack_len =
31        unsafe { __sanitizer_fast_backtrace(stack_buf.as_mut_ptr(), STACK_TRACE_MAXIMUM_DEPTH) };
32    let stack = &stack_buf[..stack_len];
33
34    with_profiler(|profiler, thread_data| {
35        // Compress the stack trace.
36        let mut compressed_stack_buf = [0; STACK_TRACE_MAXIMUM_COMPRESSED_SIZE];
37        let compressed_stack_len =
38            stack_trace_compression::compress_into(stack, &mut compressed_stack_buf);
39        let compressed_stack = &compressed_stack_buf[..compressed_stack_len];
40
41        f(profiler, thread_data, timestamp, compressed_stack)
42    })
43}
44
45// Called by Scudo after new memory has been allocated by malloc/calloc/...
46#[no_mangle]
47pub extern "C" fn __scudo_allocate_hook(ptr: *mut c_void, size: usize) {
48    with_profiler_and_call_site(|profiler, thread_data, timestamp, compressed_stack_trace| {
49        profiler.record_allocation(
50            thread_data,
51            ptr as u64,
52            size as u64,
53            compressed_stack_trace,
54            timestamp,
55        );
56    });
57}
58
59// Called by Scudo before memory is deallocated by free.
60#[no_mangle]
61pub extern "C" fn __scudo_deallocate_hook(ptr: *mut c_void) {
62    with_profiler(|profiler, thread_data| {
63        if ptr != std::ptr::null_mut() {
64            profiler.forget_allocation(thread_data, ptr as u64);
65        }
66    });
67}
68
69// Called by Scudo at the beginning of realloc.
70#[no_mangle]
71pub extern "C" fn __scudo_realloc_deallocate_hook(_old_ptr: *mut c_void) {
72    // We don't do anything at this stage. All our work happens in __scudo_realloc_allocate_hook.
73}
74
75// Called by Scudo at the end of realloc.
76#[no_mangle]
77pub extern "C" fn __scudo_realloc_allocate_hook(
78    old_ptr: *mut c_void,
79    new_ptr: *mut c_void,
80    size: usize,
81) {
82    with_profiler_and_call_site(|profiler, thread_data, timestamp, compressed_stack_trace| {
83        // Has the memory block been reallocated in-place?
84        if old_ptr == new_ptr {
85            profiler.update_allocation(
86                thread_data,
87                old_ptr as u64,
88                size as u64,
89                compressed_stack_trace,
90                timestamp,
91            );
92        } else {
93            profiler.record_allocation(
94                thread_data,
95                new_ptr as u64,
96                size as u64,
97                compressed_stack_trace,
98                timestamp,
99            );
100            profiler.forget_allocation(thread_data, old_ptr as u64);
101        }
102    });
103}