1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
45use fidl::HandleBased;
6use fuchsia_sync::{Condvar, Mutex};
7use std::cell::UnsafeCell;
8use std::collections::hash_map::Entry;
9use std::collections::{HashMap, VecDeque};
10use std::marker::PhantomData;
11use std::mem::ManuallyDrop;
12use std::ops::Deref;
13use std::sync::{Arc, OnceLock, Weak};
1415#[cfg(not(target_os = "fuchsia"))]
16use fuchsia_async::emulated_handle::zx_handle_t;
17#[cfg(target_os = "fuchsia")]
18use zx::sys::zx_handle_t;
1920/// A wrapper around zircon handles that allows them to be temporarily cloned. These temporary
21/// clones can be used with `unblock` below which requires callbacks with static lifetime. This is
22/// similar to Arc<T>, except that whilst there are no clones, there is no memory overhead, and
23/// there's no performance overhead to use them just as you would without the wrapper, except for a
24/// small overhead when they are dropped. The wrapper ensures that the handle is only dropped when
25/// there are no references.
26pub struct TempClonable<T: HandleBased>(ManuallyDrop<T>);
2728impl<T: HandleBased> TempClonable<T> {
29/// Returns a new handle that can be temporarily cloned.
30pub fn new(handle: T) -> Self {
31Self(ManuallyDrop::new(handle))
32 }
33}
3435impl<T: HandleBased> Deref for TempClonable<T> {
36type Target = T;
3738fn deref(&self) -> &T {
39&self.0
40}
41}
4243impl<T: HandleBased> TempClonable<T> {
44/// Creates a temporary clone of the handle. The clone should only exist temporarily.
45 ///
46 /// # Panics
47 ///
48 /// Panics if the handle is invalid.
49pub fn temp_clone(&self) -> TempClone<T> {
50assert!(!self.is_invalid_handle());
51let mut clones = clones().lock();
52let raw_handle = self.0.raw_handle();
53 TempClone {
54 handle: match clones.entry(raw_handle) {
55 Entry::Occupied(mut o) => {
56if let Some(clone) = o.get().upgrade() {
57 clone
58 } else {
59// The last strong reference was dropped but the entry hasn't been removed
60 // yet. This must be racing with `TempHandle::drop`. Replace the
61 // `TempHandle`.
62let clone =
63 Arc::new(TempHandle { raw_handle, tombstone: UnsafeCell::new(false) });
64*o.get_mut() = Arc::downgrade(&clone);
65 clone
66 }
67 }
68 Entry::Vacant(v) => {
69let clone =
70 Arc::new(TempHandle { raw_handle, tombstone: UnsafeCell::new(false) });
71 v.insert(Arc::downgrade(&clone));
72 clone
73 }
74 },
75 marker: PhantomData,
76 }
77 }
78}
7980impl<T: HandleBased> Drop for TempClonable<T> {
81fn drop(&mut self) {
82if let Some(handle) = clones().lock().remove(&self.0.raw_handle()).and_then(|c| c.upgrade())
83 {
84// There are still some temporary clones alive, so mark the handle with a tombstone.
8586 // SAFETY: This is the only unsafe place where we access `tombstone`. We're are holding
87 // the clones lock which ensures no other thread is concurrently accessing it, but it
88 // wouldn't normally happen anyway because it would mean there were multiple
89 // TempClonable instances wrapping the same handle, which shouldn't happen.
90unsafe { *handle.tombstone.get() = true };
91return;
92 }
9394// SAFETY: There are no temporary clones, so we can drop the handle now. No more clones can
95 // be made and it should be clear we meet the safety requirements of ManuallyDrop.
96unsafe { ManuallyDrop::drop(&mut self.0) }
97 }
98}
99100type Clones = Mutex<HashMap<zx_handle_t, Weak<TempHandle>>>;
101102/// Returns the global instance which keeps track of temporary clones.
103fn clones() -> &'static Clones {
104static CLONES: OnceLock<Clones> = OnceLock::new();
105 CLONES.get_or_init(|| Mutex::new(HashMap::new()))
106}
107108pub struct TempClone<T> {
109 handle: Arc<TempHandle>,
110 marker: PhantomData<T>,
111}
112113impl<T> Deref for TempClone<T> {
114type Target = T;
115116fn deref(&self) -> &T {
117// SAFETY: T is repr(transparent) and stores zx_handle_t.
118unsafe { std::mem::transmute::<&zx_handle_t, &T>(&self.handle.raw_handle) }
119 }
120}
121122struct TempHandle {
123 raw_handle: zx_handle_t,
124 tombstone: UnsafeCell<bool>,
125}
126127unsafe impl Send for TempHandle {}
128unsafe impl Sync for TempHandle {}
129130impl Drop for TempHandle {
131fn drop(&mut self) {
132if *self.tombstone.get_mut() {
133// SAFETY: The primary handle has been dropped and it is our job to clean up the
134 // handle. There are no memory safety issues here.
135unsafe { fidl::Handle::from_raw(self.raw_handle) };
136 } else {
137if let Entry::Occupied(o) = clones().lock().entry(self.raw_handle) {
138// There's a small window where another TempHandle could have been inserted, so
139 // before removing this entry, check for a match.
140if std::ptr::eq(o.get().as_ptr(), self) {
141 o.remove_entry();
142 }
143 }
144 }
145 }
146}
147148/// This is similar to fuchsia-async's unblock except that it used a fixed size thread pool which
149/// has the advantage of not making traces difficult to decipher because of many threads being
150/// spawned.
151pub async fn unblock<T: 'static + Send>(f: impl FnOnce() -> T + Send + 'static) -> T {
152const NUM_THREADS: u8 = 2;
153154struct State {
155 queue: Mutex<VecDeque<Box<dyn FnOnce() + Send + 'static>>>,
156 cvar: Condvar,
157 }
158159static STATE: OnceLock<State> = OnceLock::new();
160161let mut start_threads = false;
162let state = STATE.get_or_init(|| {
163 start_threads = true;
164 State { queue: Mutex::new(VecDeque::new()), cvar: Condvar::new() }
165 });
166167if start_threads {
168for _ in 0..NUM_THREADS {
169 std::thread::spawn(|| loop {
170let item = {
171let mut queue = state.queue.lock();
172loop {
173if let Some(item) = queue.pop_front() {
174break item;
175 }
176 state.cvar.wait(&mut queue);
177 }
178 };
179 item();
180 });
181 }
182 }
183184let (tx, rx) = futures::channel::oneshot::channel();
185 state.queue.lock().push_back(Box::new(move || {
186let _ = tx.send(f());
187 }));
188 state.cvar.notify_one();
189190 rx.await.unwrap()
191}
192193#[cfg(target_os = "fuchsia")]
194#[cfg(test)]
195mod tests {
196use super::{clones, TempClonable};
197198use std::sync::Arc;
199200#[test]
201fn test_temp_clone() {
202let parent_vmo = zx::Vmo::create(100).expect("create failed");
203204 {
205let temp_clone = {
206let vmo = TempClonable::new(
207 parent_vmo
208 .create_child(zx::VmoChildOptions::REFERENCE, 0, 0)
209 .expect("create_child failed"),
210 );
211212 vmo.write(b"foo", 0).expect("write failed");
213 {
214// Create and read from a temporary clone.
215let temp_clone2 = vmo.temp_clone();
216assert_eq!(&temp_clone2.read_to_vec(0, 3).expect("read_to_vec failed"), b"foo");
217 }
218219// We should still be able to read from the primary handle.
220assert_eq!(&vmo.read_to_vec(0, 3).expect("read_to_vec failed"), b"foo");
221222// Create another vmo which should get cleaned up when the primary handle is
223 // dropped.
224let vmo2 = TempClonable::new(
225 parent_vmo
226 .create_child(zx::VmoChildOptions::REFERENCE, 0, 0)
227 .expect("create_child failed"),
228 );
229// Create and immediately drop a temporary clone.
230vmo2.temp_clone();
231232// Take another clone that will get dropped after we take the clone below.
233let _clone1 = vmo.temp_clone();
234235// And return another clone.
236vmo.temp_clone()
237 };
238239// The primary handle has been dropped, but we should still be able to
240 // read via temp_clone.
241assert_eq!(&temp_clone.read_to_vec(0, 3).expect("read_to_vec failed"), b"foo");
242 }
243244// Make sure that all the VMOs got properly cleaned up.
245assert_eq!(parent_vmo.info().expect("info failed").num_children, 0);
246assert!(clones().lock().is_empty());
247 }
248249#[test]
250fn test_race() {
251let parent_vmo = zx::Vmo::create(100).expect("create failed");
252253 {
254let vmo = Arc::new(TempClonable::new(
255 parent_vmo
256 .create_child(zx::VmoChildOptions::REFERENCE, 0, 0)
257 .expect("create_child failed"),
258 ));
259 vmo.write(b"foo", 0).expect("write failed");
260261let vmo_clone = vmo.clone();
262263let t1 = std::thread::spawn(move || {
264for _ in 0..1000 {
265assert_eq!(
266&vmo.temp_clone().read_to_vec(0, 3).expect("read_to_vec failed"),
267b"foo"
268);
269 }
270 });
271272let t2 = std::thread::spawn(move || {
273for _ in 0..1000 {
274assert_eq!(
275&vmo_clone.temp_clone().read_to_vec(0, 3).expect("read_to_vec failed"),
276b"foo"
277);
278 }
279 });
280281let _ = t1.join();
282let _ = t2.join();
283 }
284285// Make sure that all the VMOs got properly cleaned up.
286assert_eq!(parent_vmo.info().expect("info failed").num_children, 0);
287assert!(clones().lock().is_empty());
288 }
289}