_core_rustc_static/
profiler.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use fidl::endpoints::{create_endpoints, ServerEnd};
6use fidl::AsHandleRef;
7use fidl_fuchsia_memory_heapdump_process as fheapdump_process;
8use std::sync::{Mutex, MutexGuard};
9use std::time::Duration;
10
11use crate::allocations_table::AllocationsTable;
12use crate::resources_table::{ResourceKey, ResourcesTable};
13use crate::waiter_list::WaiterList;
14use crate::{
15    heapdump_global_stats as HeapdumpGlobalStats,
16    heapdump_thread_local_stats as HeapdumpThreadLocalStats,
17};
18
19/// A long timeout that will definitely never trigger if everything is working as intended.
20///
21/// Its only purpose if to prevent tests from hanging indefinitely if something is broken and a
22/// waited address is never signaled.
23const WAIT_TIMEOUT: Duration = Duration::from_secs(120);
24
25/// The global instrumentation state for the current process (singleton).
26///
27/// This is the root of all the instrumentation's data structures except for per-thread data, which
28/// is stored separately.
29pub struct Profiler {
30    snapshot_sink: fheapdump_process::SnapshotSinkV1SynchronousProxy,
31    inner: Mutex<ProfilerInner>,
32}
33
34#[derive(Default)]
35struct ProfilerInner {
36    allocations_table: AllocationsTable,
37    resources_table: ResourcesTable,
38    global_stats: HeapdumpGlobalStats,
39    snapshot_sink_server: Option<ServerEnd<fheapdump_process::SnapshotSinkV1Marker>>,
40    waiters: WaiterList,
41}
42
43/// Per-thread instrumentation data.
44#[derive(Default)]
45pub struct PerThreadData {
46    local_stats: HeapdumpThreadLocalStats,
47
48    /// Koid of the current thread (cached).
49    cached_koid: Option<zx::Koid>,
50
51    /// The resource key of the current thread and its name at the time it was generated.
52    resource_key_and_name: Option<(ResourceKey, zx::Name)>,
53}
54
55impl Default for Profiler {
56    fn default() -> Profiler {
57        let (client, server) = create_endpoints();
58        let proxy = fheapdump_process::SnapshotSinkV1SynchronousProxy::new(client.into_channel());
59
60        let inner = ProfilerInner { snapshot_sink_server: Some(server), ..Default::default() };
61        Profiler { snapshot_sink: proxy, inner: Mutex::new(inner) }
62    }
63}
64
65impl Profiler {
66    pub fn bind(&self, registry_channel: zx::Channel) {
67        let process_dup = fuchsia_runtime::process_self()
68            .duplicate(zx::Rights::SAME_RIGHTS)
69            .expect("failed to duplicate process handle");
70
71        let (snapshot_sink_server, allocations_table_dup, resources_table_dup) = {
72            let mut inner = self.inner.lock().unwrap();
73            (
74                inner.snapshot_sink_server.take(),
75                inner.allocations_table.share_vmo(),
76                inner.resources_table.share_vmo(),
77            )
78        };
79        let snapshot_sink_server = snapshot_sink_server.expect("bind called more than once");
80
81        let registry_proxy = fheapdump_process::RegistrySynchronousProxy::new(registry_channel);
82
83        // Ignore result.
84        let _ = registry_proxy.register_v1(
85            process_dup,
86            allocations_table_dup,
87            resources_table_dup,
88            snapshot_sink_server,
89        );
90    }
91}
92
93impl Profiler {
94    pub fn get_global_stats(&self) -> HeapdumpGlobalStats {
95        self.inner.lock().unwrap().global_stats
96    }
97
98    fn intern_and_lock(
99        &self,
100        thread_data: &mut PerThreadData,
101        compressed_stack_trace: &[u8],
102    ) -> (MutexGuard<'_, ProfilerInner>, ResourceKey, ResourceKey) {
103        let (thread_koid, thread_name) =
104            get_current_thread_koid_and_name(&mut thread_data.cached_koid);
105
106        let mut inner = self.inner.lock().unwrap();
107        let thread_info_key = match thread_data.resource_key_and_name {
108            Some((resource_key, old_name)) if old_name == thread_name => {
109                // The previously generated resource key is still valid, because the name did not
110                // change in the meantime.
111                resource_key
112            }
113            _ => {
114                // We need to generate a new resource key.
115                let resource_key =
116                    inner.resources_table.insert_thread_info(thread_koid, &thread_name);
117                thread_data.resource_key_and_name = Some((resource_key, thread_name));
118                resource_key
119            }
120        };
121        let stack_trace_key = inner.resources_table.intern_stack_trace(compressed_stack_trace);
122
123        (inner, thread_info_key, stack_trace_key)
124    }
125
126    pub fn record_allocation(
127        &self,
128        thread_data: &mut PerThreadData,
129        address: u64,
130        size: u64,
131        compressed_stack_trace: &[u8],
132        timestamp: zx::MonotonicInstant,
133    ) {
134        let (mut inner, thread_info_key, stack_trace_key) =
135            self.intern_and_lock(thread_data, compressed_stack_trace);
136
137        // Insert the new entry. If a duplicate is found, it means that this allocation is recycling
138        // a block that was just deallocated by realloc, but for which __scudo_realloc_allocate_hook
139        // (i.e. the realloc end notification) has not been executed yet.
140        while !inner.allocations_table.try_record_allocation(
141            address,
142            size,
143            thread_info_key,
144            stack_trace_key,
145            timestamp,
146        ) {
147            inner = WaiterList::wait(inner, |inner| &mut inner.waiters, address, WAIT_TIMEOUT);
148        }
149
150        inner.global_stats.total_allocated_bytes += size;
151        thread_data.local_stats.total_allocated_bytes += size;
152    }
153
154    pub fn forget_allocation(&self, thread_data: &mut PerThreadData, address: u64) {
155        let mut inner = self.inner.lock().unwrap();
156        let size = inner.allocations_table.forget_allocation(address);
157
158        inner.global_stats.total_deallocated_bytes += size;
159        thread_data.local_stats.total_deallocated_bytes += size;
160
161        // Notify the waiter (if any).
162        inner.waiters.notify_one(address);
163    }
164
165    pub fn update_allocation(
166        &self,
167        thread_data: &mut PerThreadData,
168        address: u64,
169        size: u64,
170        compressed_stack_trace: &[u8],
171        timestamp: zx::MonotonicInstant,
172    ) {
173        let (mut inner, thread_info_key, stack_trace_key) =
174            self.intern_and_lock(thread_data, compressed_stack_trace);
175
176        let old_size = inner.allocations_table.update_allocation(
177            address,
178            size,
179            thread_info_key,
180            stack_trace_key,
181            timestamp,
182        );
183
184        if size > old_size {
185            let delta_allocated_bytes = size - old_size;
186            inner.global_stats.total_allocated_bytes += delta_allocated_bytes;
187            thread_data.local_stats.total_allocated_bytes += delta_allocated_bytes;
188        } else {
189            let delta_deallocated_bytes = old_size - size;
190            inner.global_stats.total_deallocated_bytes += delta_deallocated_bytes;
191            thread_data.local_stats.total_deallocated_bytes += delta_deallocated_bytes;
192        }
193    }
194
195    pub fn publish_named_snapshot(&self, name: &str) {
196        let allocations_table_snapshot = {
197            let inner = self.inner.lock().unwrap();
198            inner.allocations_table.snapshot_vmo()
199        };
200
201        // Ignore outcome.
202        let _ = self.snapshot_sink.store_named_snapshot(name, allocations_table_snapshot);
203    }
204}
205
206impl PerThreadData {
207    pub fn get_local_stats(&self) -> HeapdumpThreadLocalStats {
208        self.local_stats
209    }
210}
211
212fn get_current_thread_koid_and_name(koid_cache: &mut Option<zx::Koid>) -> (zx::Koid, zx::Name) {
213    // Obtain the koid and the name of the current thread. Unlike the koid, the thread name
214    // cannot be cached as it can change between calls.
215    let thread = fuchsia_runtime::thread_self();
216    let koid = koid_cache
217        .get_or_insert_with(|| thread.get_koid().expect("failed to get current thread's koid"));
218    let name = thread.get_name().expect("failed to get current thread's name");
219
220    (*koid, name)
221}