_core_rustc_static/
profiler.rsuse fidl::endpoints::{create_endpoints, ServerEnd};
use fidl::AsHandleRef;
use fidl_fuchsia_memory_heapdump_process as fheapdump_process;
use std::sync::{Mutex, MutexGuard};
use std::time::Duration;
use crate::allocations_table::AllocationsTable;
use crate::resources_table::{ResourceKey, ResourcesTable};
use crate::waiter_list::WaiterList;
use crate::{
heapdump_global_stats as HeapdumpGlobalStats,
heapdump_thread_local_stats as HeapdumpThreadLocalStats,
};
const WAIT_TIMEOUT: Duration = Duration::from_secs(120);
pub struct Profiler {
snapshot_sink: fheapdump_process::SnapshotSinkV1SynchronousProxy,
inner: Mutex<ProfilerInner>,
}
#[derive(Default)]
struct ProfilerInner {
allocations_table: AllocationsTable,
resources_table: ResourcesTable,
global_stats: HeapdumpGlobalStats,
snapshot_sink_server: Option<ServerEnd<fheapdump_process::SnapshotSinkV1Marker>>,
waiters: WaiterList,
}
#[derive(Default)]
pub struct PerThreadData {
local_stats: HeapdumpThreadLocalStats,
cached_koid: Option<zx::Koid>,
resource_key_and_name: Option<(ResourceKey, zx::Name)>,
}
impl Default for Profiler {
fn default() -> Profiler {
let (client, server) = create_endpoints();
let proxy = fheapdump_process::SnapshotSinkV1SynchronousProxy::new(client.into_channel());
let inner = ProfilerInner { snapshot_sink_server: Some(server), ..Default::default() };
Profiler { snapshot_sink: proxy, inner: Mutex::new(inner) }
}
}
impl Profiler {
pub fn bind(&self, registry_channel: zx::Channel) {
let process_dup = fuchsia_runtime::process_self()
.duplicate(zx::Rights::SAME_RIGHTS)
.expect("failed to duplicate process handle");
let (snapshot_sink_server, allocations_table_dup, resources_table_dup) = {
let mut inner = self.inner.lock().unwrap();
(
inner.snapshot_sink_server.take(),
inner.allocations_table.share_vmo(),
inner.resources_table.share_vmo(),
)
};
let snapshot_sink_server = snapshot_sink_server.expect("bind called more than once");
let registry_proxy = fheapdump_process::RegistrySynchronousProxy::new(registry_channel);
let _ = registry_proxy.register_v1(
process_dup,
allocations_table_dup,
resources_table_dup,
snapshot_sink_server,
);
}
}
impl Profiler {
pub fn get_global_stats(&self) -> HeapdumpGlobalStats {
self.inner.lock().unwrap().global_stats
}
fn intern_and_lock(
&self,
thread_data: &mut PerThreadData,
compressed_stack_trace: &[u8],
) -> (MutexGuard<'_, ProfilerInner>, ResourceKey, ResourceKey) {
let (thread_koid, thread_name) =
get_current_thread_koid_and_name(&mut thread_data.cached_koid);
let mut inner = self.inner.lock().unwrap();
let thread_info_key = match thread_data.resource_key_and_name {
Some((resource_key, old_name)) if old_name == thread_name => {
resource_key
}
_ => {
let resource_key =
inner.resources_table.insert_thread_info(thread_koid, &thread_name);
thread_data.resource_key_and_name = Some((resource_key, thread_name));
resource_key
}
};
let stack_trace_key = inner.resources_table.intern_stack_trace(compressed_stack_trace);
(inner, thread_info_key, stack_trace_key)
}
pub fn record_allocation(
&self,
thread_data: &mut PerThreadData,
address: u64,
size: u64,
compressed_stack_trace: &[u8],
timestamp: zx::MonotonicInstant,
) {
let (mut inner, thread_info_key, stack_trace_key) =
self.intern_and_lock(thread_data, compressed_stack_trace);
while !inner.allocations_table.try_record_allocation(
address,
size,
thread_info_key,
stack_trace_key,
timestamp,
) {
inner = WaiterList::wait(inner, |inner| &mut inner.waiters, address, WAIT_TIMEOUT);
}
inner.global_stats.total_allocated_bytes += size;
thread_data.local_stats.total_allocated_bytes += size;
}
pub fn forget_allocation(&self, thread_data: &mut PerThreadData, address: u64) {
let mut inner = self.inner.lock().unwrap();
let size = inner.allocations_table.forget_allocation(address);
inner.global_stats.total_deallocated_bytes += size;
thread_data.local_stats.total_deallocated_bytes += size;
inner.waiters.notify_one(address);
}
pub fn update_allocation(
&self,
thread_data: &mut PerThreadData,
address: u64,
size: u64,
compressed_stack_trace: &[u8],
timestamp: zx::MonotonicInstant,
) {
let (mut inner, thread_info_key, stack_trace_key) =
self.intern_and_lock(thread_data, compressed_stack_trace);
let old_size = inner.allocations_table.update_allocation(
address,
size,
thread_info_key,
stack_trace_key,
timestamp,
);
if size > old_size {
let delta_allocated_bytes = size - old_size;
inner.global_stats.total_allocated_bytes += delta_allocated_bytes;
thread_data.local_stats.total_allocated_bytes += delta_allocated_bytes;
} else {
let delta_deallocated_bytes = old_size - size;
inner.global_stats.total_deallocated_bytes += delta_deallocated_bytes;
thread_data.local_stats.total_deallocated_bytes += delta_deallocated_bytes;
}
}
pub fn publish_named_snapshot(&self, name: &str) {
let allocations_table_snapshot = {
let inner = self.inner.lock().unwrap();
inner.allocations_table.snapshot_vmo()
};
let _ = self.snapshot_sink.store_named_snapshot(name, allocations_table_snapshot);
}
}
impl PerThreadData {
pub fn get_local_stats(&self) -> HeapdumpThreadLocalStats {
self.local_stats
}
}
fn get_current_thread_koid_and_name(koid_cache: &mut Option<zx::Koid>) -> (zx::Koid, zx::Name) {
let thread = fuchsia_runtime::thread_self();
let koid = koid_cache
.get_or_insert_with(|| thread.get_koid().expect("failed to get current thread's koid"));
let name = thread.get_name().expect("failed to get current thread's name");
(*koid, name)
}