1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
45use fidl::HandleBased;
6use std::cell::UnsafeCell;
7use std::collections::hash_map::Entry;
8use std::collections::{HashMap, VecDeque};
9use std::marker::PhantomData;
10use std::mem::ManuallyDrop;
11use std::ops::Deref;
12use std::sync::{Arc, Condvar, Mutex, OnceLock, Weak};
1314#[cfg(not(target_os = "fuchsia"))]
15use fuchsia_async::emulated_handle::zx_handle_t;
16#[cfg(target_os = "fuchsia")]
17use zx::sys::zx_handle_t;
1819/// A wrapper around zircon handles that allows them to be temporarily cloned. These temporary
20/// clones can be used with `unblock` below which requires callbacks with static lifetime. This is
21/// similar to Arc<T>, except that whilst there are no clones, there is no memory overhead, and
22/// there's no performance overhead to use them just as you would without the wrapper, except for a
23/// small overhead when they are dropped. The wrapper ensures that the handle is only dropped when
24/// there are no references.
25pub struct TempClonable<T: HandleBased>(ManuallyDrop<T>);
2627impl<T: HandleBased> TempClonable<T> {
28/// Returns a new handle that can be temporarily cloned.
29pub fn new(handle: T) -> Self {
30Self(ManuallyDrop::new(handle))
31 }
32}
3334impl<T: HandleBased> Deref for TempClonable<T> {
35type Target = T;
3637fn deref(&self) -> &T {
38&self.0
39}
40}
4142impl<T: HandleBased> TempClonable<T> {
43/// Creates a temporary clone of the handle. The clone should only exist temporarily.
44 ///
45 /// # Panics
46 ///
47 /// Panics if the handle is invalid.
48pub fn temp_clone(&self) -> TempClone<T> {
49assert!(!self.is_invalid_handle());
50let mut clones = clones().lock().unwrap();
51let raw_handle = self.0.raw_handle();
52 TempClone {
53 handle: match clones.entry(raw_handle) {
54 Entry::Occupied(mut o) => {
55if let Some(clone) = o.get().upgrade() {
56 clone
57 } else {
58// The last strong reference was dropped but the entry hasn't been removed
59 // yet. This must be racing with `TempHandle::drop`. Replace the
60 // `TempHandle`.
61let clone =
62 Arc::new(TempHandle { raw_handle, tombstone: UnsafeCell::new(false) });
63*o.get_mut() = Arc::downgrade(&clone);
64 clone
65 }
66 }
67 Entry::Vacant(v) => {
68let clone =
69 Arc::new(TempHandle { raw_handle, tombstone: UnsafeCell::new(false) });
70 v.insert(Arc::downgrade(&clone));
71 clone
72 }
73 },
74 marker: PhantomData,
75 }
76 }
77}
7879impl<T: HandleBased> Drop for TempClonable<T> {
80fn drop(&mut self) {
81if let Some(handle) =
82 clones().lock().unwrap().remove(&self.0.raw_handle()).and_then(|c| c.upgrade())
83 {
84// There are still some temporary clones alive, so mark the handle with a tombstone.
8586 // SAFETY: This is the only unsafe place where we access `tombstone`. We're are holding
87 // the clones lock which ensures no other thread is concurrently accessing it, but it
88 // wouldn't normally happen anyway because it would mean there were multiple
89 // TempClonable instances wrapping the same handle, which shouldn't happen.
90unsafe { *handle.tombstone.get() = true };
91return;
92 }
9394// SAFETY: There are no temporary clones, so we can drop the handle now. No more clones can
95 // be made and it should be clear we meet the safety requirements of ManuallyDrop.
96unsafe { ManuallyDrop::drop(&mut self.0) }
97 }
98}
99100type Clones = Mutex<HashMap<zx_handle_t, Weak<TempHandle>>>;
101102/// Returns the global instance which keeps track of temporary clones.
103fn clones() -> &'static Clones {
104static CLONES: OnceLock<Clones> = OnceLock::new();
105 CLONES.get_or_init(|| Mutex::new(HashMap::new()))
106}
107108pub struct TempClone<T> {
109 handle: Arc<TempHandle>,
110 marker: PhantomData<T>,
111}
112113impl<T> Deref for TempClone<T> {
114type Target = T;
115116fn deref(&self) -> &T {
117// SAFETY: T is repr(transparent) and stores zx_handle_t.
118unsafe { std::mem::transmute::<&zx_handle_t, &T>(&self.handle.raw_handle) }
119 }
120}
121122struct TempHandle {
123 raw_handle: zx_handle_t,
124 tombstone: UnsafeCell<bool>,
125}
126127unsafe impl Send for TempHandle {}
128unsafe impl Sync for TempHandle {}
129130impl Drop for TempHandle {
131fn drop(&mut self) {
132if *self.tombstone.get_mut() {
133// SAFETY: The primary handle has been dropped and it is our job to clean up the
134 // handle. There are no memory safety issues here.
135unsafe { fidl::Handle::from_raw(self.raw_handle) };
136 } else {
137if let Entry::Occupied(o) = clones().lock().unwrap().entry(self.raw_handle) {
138// There's a small window where another TempHandle could have been inserted, so
139 // before removing this entry, check for a match.
140if std::ptr::eq(o.get().as_ptr(), self) {
141 o.remove_entry();
142 }
143 }
144 }
145 }
146}
147148/// This is similar to fuchsia-async's unblock except that it used a fixed size thread pool which
149/// has the advantage of not making traces difficult to decipher because of many threads being
150/// spawned.
151pub async fn unblock<T: 'static + Send>(f: impl FnOnce() -> T + Send + 'static) -> T {
152const NUM_THREADS: u8 = 2;
153154struct State {
155 queue: Mutex<VecDeque<Box<dyn FnOnce() + Send + 'static>>>,
156 cvar: Condvar,
157 }
158159static STATE: OnceLock<State> = OnceLock::new();
160161let mut start_threads = false;
162let state = STATE.get_or_init(|| {
163 start_threads = true;
164 State { queue: Mutex::new(VecDeque::new()), cvar: Condvar::new() }
165 });
166167if start_threads {
168for _ in 0..NUM_THREADS {
169 std::thread::spawn(|| loop {
170let item = {
171let mut queue = state.queue.lock().unwrap();
172loop {
173if let Some(item) = queue.pop_front() {
174break item;
175 }
176 queue = state.cvar.wait(queue).unwrap();
177 }
178 };
179 item();
180 });
181 }
182 }
183184let (tx, rx) = futures::channel::oneshot::channel();
185 state.queue.lock().unwrap().push_back(Box::new(move || {
186let _ = tx.send(f());
187 }));
188 state.cvar.notify_one();
189190 rx.await.unwrap()
191}
192193#[cfg(target_os = "fuchsia")]
194#[cfg(test)]
195mod tests {
196use super::{clones, TempClonable};
197198use std::sync::Arc;
199200#[test]
201fn test_temp_clone() {
202let parent_vmo = zx::Vmo::create(100).expect("create failed");
203204 {
205let temp_clone = {
206let vmo = TempClonable::new(
207 parent_vmo
208 .create_child(zx::VmoChildOptions::REFERENCE, 0, 0)
209 .expect("create_child failed"),
210 );
211212 vmo.write(b"foo", 0).expect("write failed");
213 {
214// Create and read from a temporary clone.
215let temp_clone2 = vmo.temp_clone();
216assert_eq!(&temp_clone2.read_to_vec(0, 3).expect("read_to_vec failed"), b"foo");
217 }
218219// We should still be able to read from the primary handle.
220assert_eq!(&vmo.read_to_vec(0, 3).expect("read_to_vec failed"), b"foo");
221222// Create another vmo which should get cleaned up when the primary handle is
223 // dropped.
224let vmo2 = TempClonable::new(
225 parent_vmo
226 .create_child(zx::VmoChildOptions::REFERENCE, 0, 0)
227 .expect("create_child failed"),
228 );
229// Create and immediately drop a temporary clone.
230vmo2.temp_clone();
231232// Take another clone that will get dropped after we take the clone below.
233let _clone1 = vmo.temp_clone();
234235// And return another clone.
236vmo.temp_clone()
237 };
238239// The primary handle has been dropped, but we should still be able to
240 // read via temp_clone.
241assert_eq!(&temp_clone.read_to_vec(0, 3).expect("read_to_vec failed"), b"foo");
242 }
243244// Make sure that all the VMOs got properly cleaned up.
245assert_eq!(parent_vmo.info().expect("info failed").num_children, 0);
246assert!(clones().lock().unwrap().is_empty());
247 }
248249#[test]
250fn test_race() {
251let parent_vmo = zx::Vmo::create(100).expect("create failed");
252253 {
254let vmo = Arc::new(TempClonable::new(
255 parent_vmo
256 .create_child(zx::VmoChildOptions::REFERENCE, 0, 0)
257 .expect("create_child failed"),
258 ));
259 vmo.write(b"foo", 0).expect("write failed");
260261let vmo_clone = vmo.clone();
262263let t1 = std::thread::spawn(move || {
264for _ in 0..1000 {
265assert_eq!(
266&vmo.temp_clone().read_to_vec(0, 3).expect("read_to_vec failed"),
267b"foo"
268);
269 }
270 });
271272let t2 = std::thread::spawn(move || {
273for _ in 0..1000 {
274assert_eq!(
275&vmo_clone.temp_clone().read_to_vec(0, 3).expect("read_to_vec failed"),
276b"foo"
277);
278 }
279 });
280281let _ = t1.join();
282let _ = t2.join();
283 }
284285// Make sure that all the VMOs got properly cleaned up.
286assert_eq!(parent_vmo.info().expect("info failed").num_children, 0);
287assert!(clones().lock().unwrap().is_empty());
288 }
289}