_core_rustc_static/
hooks.rsuse heapdump_vmo::stack_trace_compression;
use std::ffi::c_void;
use crate::{with_profiler, PerThreadData, Profiler};
const STACK_TRACE_MAXIMUM_DEPTH: usize = 64;
const STACK_TRACE_MAXIMUM_COMPRESSED_SIZE: usize =
stack_trace_compression::max_compressed_size(STACK_TRACE_MAXIMUM_DEPTH);
extern "C" {
fn __sanitizer_fast_backtrace(buffer: *mut u64, buffer_size: usize) -> usize;
}
#[inline(always)]
fn with_profiler_and_call_site(
f: impl FnOnce(&Profiler, &mut PerThreadData, zx::MonotonicInstant, &[u8]),
) {
let timestamp = zx::MonotonicInstant::get();
let mut stack_buf = [0; STACK_TRACE_MAXIMUM_DEPTH];
let stack_len =
unsafe { __sanitizer_fast_backtrace(stack_buf.as_mut_ptr(), STACK_TRACE_MAXIMUM_DEPTH) };
let stack = &stack_buf[..stack_len];
with_profiler(|profiler, thread_data| {
let mut compressed_stack_buf = [0; STACK_TRACE_MAXIMUM_COMPRESSED_SIZE];
let compressed_stack_len =
stack_trace_compression::compress_into(stack, &mut compressed_stack_buf);
let compressed_stack = &compressed_stack_buf[..compressed_stack_len];
f(profiler, thread_data, timestamp, compressed_stack)
})
}
#[no_mangle]
pub extern "C" fn __scudo_allocate_hook(ptr: *mut c_void, size: usize) {
with_profiler_and_call_site(|profiler, thread_data, timestamp, compressed_stack_trace| {
profiler.record_allocation(
thread_data,
ptr as u64,
size as u64,
compressed_stack_trace,
timestamp,
);
});
}
#[no_mangle]
pub extern "C" fn __scudo_deallocate_hook(ptr: *mut c_void) {
with_profiler(|profiler, thread_data| {
if ptr != std::ptr::null_mut() {
profiler.forget_allocation(thread_data, ptr as u64);
}
});
}
#[no_mangle]
pub extern "C" fn __scudo_realloc_deallocate_hook(_old_ptr: *mut c_void) {
}
#[no_mangle]
pub extern "C" fn __scudo_realloc_allocate_hook(
old_ptr: *mut c_void,
new_ptr: *mut c_void,
size: usize,
) {
with_profiler_and_call_site(|profiler, thread_data, timestamp, compressed_stack_trace| {
if old_ptr == new_ptr {
profiler.update_allocation(
thread_data,
old_ptr as u64,
size as u64,
compressed_stack_trace,
timestamp,
);
} else {
profiler.record_allocation(
thread_data,
new_ptr as u64,
size as u64,
compressed_stack_trace,
timestamp,
);
profiler.forget_allocation(thread_data, old_ptr as u64);
}
});
}