1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
45use heapdump_vmo::stack_trace_compression;
6use std::ffi::c_void;
78use crate::{with_profiler, PerThreadData, Profiler};
910const STACK_TRACE_MAXIMUM_DEPTH: usize = 64;
11const STACK_TRACE_MAXIMUM_COMPRESSED_SIZE: usize =
12 stack_trace_compression::max_compressed_size(STACK_TRACE_MAXIMUM_DEPTH);
1314extern "C" {
15fn __sanitizer_fast_backtrace(buffer: *mut u64, buffer_size: usize) -> usize;
16}
1718// Like `with_profiler`, but pass the current timestamp and the compressed call stack too.
19//
20// Note: This is function is `inline(always)` so that it doesn't appear in the stack trace.
21#[inline(always)]
22fn with_profiler_and_call_site(
23 f: impl FnOnce(&Profiler, &mut PerThreadData, zx::MonotonicInstant, &[u8]),
24) {
25// Collect the timestamp as early as possible.
26let timestamp = zx::MonotonicInstant::get();
2728// Collect stack trace outside of the recursion guard to avoid including it in the stack trace.
29let mut stack_buf = [0; STACK_TRACE_MAXIMUM_DEPTH];
30let stack_len =
31unsafe { __sanitizer_fast_backtrace(stack_buf.as_mut_ptr(), STACK_TRACE_MAXIMUM_DEPTH) };
32let stack = &stack_buf[..stack_len];
3334 with_profiler(|profiler, thread_data| {
35// Compress the stack trace.
36let mut compressed_stack_buf = [0; STACK_TRACE_MAXIMUM_COMPRESSED_SIZE];
37let compressed_stack_len =
38 stack_trace_compression::compress_into(stack, &mut compressed_stack_buf);
39let compressed_stack = &compressed_stack_buf[..compressed_stack_len];
4041 f(profiler, thread_data, timestamp, compressed_stack)
42 })
43}
4445// Called by Scudo after new memory has been allocated by malloc/calloc/...
46#[no_mangle]
47pub extern "C" fn __scudo_allocate_hook(ptr: *mut c_void, size: usize) {
48 with_profiler_and_call_site(|profiler, thread_data, timestamp, compressed_stack_trace| {
49 profiler.record_allocation(
50 thread_data,
51 ptr as u64,
52 size as u64,
53 compressed_stack_trace,
54 timestamp,
55 );
56 });
57}
5859// Called by Scudo before memory is deallocated by free.
60#[no_mangle]
61pub extern "C" fn __scudo_deallocate_hook(ptr: *mut c_void) {
62 with_profiler(|profiler, thread_data| {
63if ptr != std::ptr::null_mut() {
64 profiler.forget_allocation(thread_data, ptr as u64);
65 }
66 });
67}
6869// Called by Scudo at the beginning of realloc.
70#[no_mangle]
71pub extern "C" fn __scudo_realloc_deallocate_hook(_old_ptr: *mut c_void) {
72// We don't do anything at this stage. All our work happens in __scudo_realloc_allocate_hook.
73}
7475// Called by Scudo at the end of realloc.
76#[no_mangle]
77pub extern "C" fn __scudo_realloc_allocate_hook(
78 old_ptr: *mut c_void,
79 new_ptr: *mut c_void,
80 size: usize,
81) {
82 with_profiler_and_call_site(|profiler, thread_data, timestamp, compressed_stack_trace| {
83// Has the memory block been reallocated in-place?
84if old_ptr == new_ptr {
85 profiler.update_allocation(
86 thread_data,
87 old_ptr as u64,
88 size as u64,
89 compressed_stack_trace,
90 timestamp,
91 );
92 } else {
93 profiler.record_allocation(
94 thread_data,
95 new_ptr as u64,
96 size as u64,
97 compressed_stack_trace,
98 timestamp,
99 );
100 profiler.forget_allocation(thread_data, old_ptr as u64);
101 }
102 });
103}