fuchsia_async/runtime/fuchsia/executor/atomic_future.rs
1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5pub mod hooks;
6pub mod spawnable_future;
7
8use crate::ScopeHandle;
9use futures::ready;
10use std::future::Future;
11use std::hash::{Hash, Hasher};
12use std::marker::PhantomData;
13use std::mem::ManuallyDrop;
14use std::ops::Deref;
15use std::pin::Pin;
16use std::ptr::NonNull;
17use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
18use std::sync::atomic::{AtomicUsize, Ordering};
19use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
20
21/// A lock-free thread-safe future.
22//
23// The debugger knows the layout so that async backtraces work, so if this changes the debugger
24// might need to be changed too.
25//
26// This is `repr(C)` so that we can cast between `NonNull<Meta>` and `NonNull<AtomicFuture<F>>`.
27//
28// LINT.IfChange
29#[repr(C)]
30struct AtomicFuture<F: Future> {
31 meta: Meta,
32
33 // `future` is safe to access after successfully clearing the INACTIVE state bit and the `DONE`
34 // state bit isn't set.
35 future: FutureOrResult<F>,
36}
37// LINT.ThenChange(//src/developer/debug/zxdb/console/commands/verb_async_backtrace.cc)
38
39/// A lock-free thread-safe future. The handles can be cloned.
40#[derive(Debug)]
41pub struct AtomicFutureHandle<'a>(NonNull<Meta>, PhantomData<&'a ()>);
42
43/// `AtomicFutureHandle` is safe to access from multiple threads at once.
44unsafe impl Sync for AtomicFutureHandle<'_> {}
45unsafe impl Send for AtomicFutureHandle<'_> {}
46
47impl Drop for AtomicFutureHandle<'_> {
48 fn drop(&mut self) {
49 self.meta().release();
50 }
51}
52
53impl Clone for AtomicFutureHandle<'_> {
54 fn clone(&self) -> Self {
55 self.meta().retain();
56 Self(self.0, PhantomData)
57 }
58}
59
60impl PartialEq for AtomicFutureHandle<'_> {
61 fn eq(&self, other: &Self) -> bool {
62 self.0 == other.0
63 }
64}
65
66impl Eq for AtomicFutureHandle<'_> {}
67
68impl Hash for AtomicFutureHandle<'_> {
69 fn hash<H: Hasher>(&self, state: &mut H) {
70 self.0.hash(state);
71 }
72}
73
74struct Meta {
75 vtable: &'static VTable,
76
77 // Holds the reference count and state bits (INACTIVE, READY, etc.).
78 state: AtomicUsize,
79
80 scope: Option<ScopeHandle>,
81}
82
83impl Meta {
84 // # Safety
85 //
86 // This mints a handle with the 'static lifetime, so this should only be called from
87 // `AtomicFutureHandle<'static>`.
88 unsafe fn wake(&self) {
89 if self.state.fetch_or(READY, Relaxed) & (INACTIVE | READY | DONE) == INACTIVE {
90 self.retain();
91 self.scope().executor().task_is_ready(AtomicFutureHandle(self.into(), PhantomData));
92 }
93 }
94
95 // Returns true if a guard should be acquired.
96 //
97 // # Safety
98 //
99 // This mints a handle with the 'static lifetime, so this should only be called from
100 // `AtomicFutureHandle<'static>`.
101 unsafe fn wake_with_active_guard(&self) -> bool {
102 let old = self.state.fetch_or(READY | WITH_ACTIVE_GUARD, Relaxed);
103 if old & (INACTIVE | READY | DONE) == INACTIVE {
104 self.retain();
105 self.scope().executor().task_is_ready(AtomicFutureHandle(self.into(), PhantomData));
106 }
107
108 // If the task is DONE, the guard won't be released, so we must let the caller know.
109 old & (DONE | WITH_ACTIVE_GUARD) == 0
110 }
111
112 fn scope(&self) -> &ScopeHandle {
113 self.scope.as_ref().unwrap()
114 }
115
116 fn retain(&self) {
117 let old = self.state.fetch_add(1, Relaxed) & REF_COUNT_MASK;
118 assert!(old != REF_COUNT_MASK);
119 }
120
121 fn release(&self) {
122 // This can be Relaxed because there is a barrier in the drop function.
123 let old = self.state.fetch_sub(1, Relaxed) & REF_COUNT_MASK;
124 if old == 1 {
125 // SAFETY: This is safe because we just released the last reference.
126 unsafe {
127 (self.vtable.drop)(self.into());
128 }
129 } else {
130 // Check for underflow.
131 assert!(old > 0);
132 }
133 }
134
135 // # Safety
136 //
137 // The caller must know that the future has completed.
138 unsafe fn drop_result(&self, ordering: Ordering) {
139 // It's possible for this to race with another thread so we only drop the result if we are
140 // successful in setting the RESULT_TAKEN bit.
141 if self.state.fetch_or(RESULT_TAKEN, ordering) & RESULT_TAKEN == 0 {
142 unsafe { (self.vtable.drop_result)(self.into()) };
143 }
144 }
145}
146
147struct VTable {
148 /// Drops the atomic future.
149 ///
150 /// # Safety
151 ///
152 /// The caller must ensure there are no other references i.e. the reference count should be
153 /// zero.
154 // zxdb uses this method to figure out the concrete type of the future.
155 // LINT.IfChange
156 drop: unsafe fn(NonNull<Meta>),
157 // LINT.ThenChange(//src/developer/debug/zxdb/console/commands/verb_async_backtrace.cc)
158 /// Drops the future.
159 ///
160 /// # Safety
161 ///
162 /// The caller must ensure the future hasn't been dropped.
163 drop_future: unsafe fn(NonNull<Meta>),
164 /// Polls the future.
165 ///
166 /// # Safety
167 ///
168 /// The caller must ensure the future hasn't been dropped and has exclusive access.
169 poll: unsafe fn(NonNull<Meta>, cx: &mut Context<'_>) -> Poll<()>,
170
171 /// Gets the result.
172 ///
173 /// # Safety
174 ///
175 /// The caller must ensure the future is finished and the result hasn't been taken or dropped.
176 get_result: unsafe fn(NonNull<Meta>) -> *const (),
177
178 /// Drops the result.
179 ///
180 /// # Safety
181 ///
182 /// The caller must ensure the future is finished and the result hasn't already been taken or
183 /// dropped.
184 drop_result: unsafe fn(NonNull<Meta>),
185}
186
187union FutureOrResult<F: Future> {
188 future: ManuallyDrop<F>,
189 result: ManuallyDrop<F::Output>,
190}
191
192impl<F: Future> AtomicFuture<F> {
193 const VTABLE: VTable = VTable {
194 drop: Self::drop,
195 drop_future: Self::drop_future,
196 poll: Self::poll,
197 get_result: Self::get_result,
198 drop_result: Self::drop_result,
199 };
200
201 unsafe fn drop(meta: NonNull<Meta>) {
202 drop(unsafe { Box::from_raw(meta.cast::<Self>().as_mut()) });
203 }
204
205 unsafe fn poll(meta: NonNull<Meta>, cx: &mut Context<'_>) -> Poll<()> {
206 let future = &mut unsafe { meta.cast::<Self>().as_mut() }.future;
207 let result = ready!(unsafe { Pin::new_unchecked(&mut *future.future) }.poll(cx));
208 // This might panic which will leave ourselves in a bad state. We deal with this by
209 // aborting (see below).
210 unsafe { ManuallyDrop::drop(&mut future.future) };
211 future.result = ManuallyDrop::new(result);
212 Poll::Ready(())
213 }
214
215 unsafe fn drop_future(meta: NonNull<Meta>) {
216 unsafe { ManuallyDrop::drop(&mut meta.cast::<Self>().as_mut().future.future) };
217 }
218
219 unsafe fn get_result(meta: NonNull<Meta>) -> *const () {
220 unsafe { &*meta.cast::<Self>().as_mut().future.result as *const F::Output as *const () }
221 }
222
223 unsafe fn drop_result(meta: NonNull<Meta>) {
224 unsafe { ManuallyDrop::drop(&mut meta.cast::<Self>().as_mut().future.result) };
225 }
226}
227
228/// State Bits
229//
230// Exclusive access is gained by clearing this bit.
231const INACTIVE: usize = 1 << 63;
232
233// Set to indicate the future needs to be polled again.
234const READY: usize = 1 << 62;
235
236// Terminal state: the future is dropped upon entry to this state. When in this state, other bits
237// can be set, including READY (which has no meaning).
238const DONE: usize = 1 << 61;
239
240// The task has been detached.
241const DETACHED: usize = 1 << 60;
242
243// The task has been cancelled.
244const ABORTED: usize = 1 << 59;
245
246// The task has an active guard that should be dropped when the task is next polled.
247const WITH_ACTIVE_GUARD: usize = 1 << 58;
248
249// The result has been taken.
250const RESULT_TAKEN: usize = 1 << 57;
251
252// The task is low priority.
253const LOW_PRIORITY: usize = 1 << 56;
254
255// The mask for the ref count.
256const REF_COUNT_MASK: usize = LOW_PRIORITY - 1;
257
258/// The result of a call to `try_poll`.
259/// This indicates the result of attempting to `poll` the future.
260pub enum AttemptPollResult {
261 /// The future was polled, but did not complete.
262 Pending,
263 /// The future was polled and finished by this thread.
264 /// This result is normally used to trigger garbage-collection of the future.
265 IFinished,
266 /// The future was already completed by another thread.
267 SomeoneElseFinished,
268 /// The future was polled, did not complete, but it is woken whilst it is polled so it
269 /// should be polled again.
270 Yield,
271 /// The future was aborted.
272 Aborted,
273}
274
275/// The result of calling the `abort_and_detach` function.
276#[must_use]
277pub enum AbortAndDetachResult {
278 /// The future has finished; it can be dropped.
279 Done,
280
281 /// The future needs to be added to a run queue to be aborted.
282 AddToRunQueue,
283
284 /// The future is soon to be aborted and nothing needs to be done.
285 Pending,
286}
287
288impl<'a> AtomicFutureHandle<'a> {
289 /// Create a new `AtomicFuture`.
290 pub(crate) fn new<F: Future + Send + 'a>(scope: Option<ScopeHandle>, future: F) -> Self
291 where
292 F::Output: Send + 'a,
293 {
294 // SAFETY: This is safe because the future and output are both Send.
295 unsafe { Self::new_local(scope, future) }
296 }
297
298 /// Create a new `AtomicFuture` from a !Send future.
299 ///
300 /// # Safety
301 ///
302 /// The caller must uphold the Send requirements.
303 pub(crate) unsafe fn new_local<F: Future + 'a>(scope: Option<ScopeHandle>, future: F) -> Self
304 where
305 F::Output: 'a,
306 {
307 Self(
308 unsafe {
309 NonNull::new_unchecked(Box::into_raw(Box::new(AtomicFuture {
310 meta: Meta {
311 vtable: &AtomicFuture::<F>::VTABLE,
312 // The future is inactive and we start with a single reference.
313 state: AtomicUsize::new(1 | INACTIVE),
314 scope,
315 },
316 future: FutureOrResult { future: ManuallyDrop::new(future) },
317 })))
318 }
319 .cast::<Meta>(),
320 PhantomData,
321 )
322 }
323
324 fn meta(&self) -> &Meta {
325 // SAFETY: This is safe because we hold a reference count.
326 unsafe { self.0.as_ref() }
327 }
328
329 /// Returns the future's ID.
330 ///
331 /// The ID is only valid so long as there exists at least one live handle.
332 pub fn id(&self) -> usize {
333 // We use the address of the metadata as the ID since we know it's a stable heap address.
334 // We can't use Pin to guarantee it never moves because the actual pointer to the
335 // AtomicFuture is stored as a NonNull<Meta>.
336 //
337 // See https://github.com/rust-lang/rust/issues/54815 for an upstream feature request that
338 // would let us encode this in the types.
339 self.meta() as *const Meta as usize
340 }
341
342 /// Returns the associated scope.
343 pub fn scope(&self) -> &ScopeHandle {
344 self.meta().scope()
345 }
346
347 /// Attempt to poll the underlying future.
348 ///
349 /// `try_poll` ensures that the future is polled at least once more
350 /// unless it has already finished.
351 pub(crate) fn try_poll(&self, cx: &mut Context<'_>) -> AttemptPollResult {
352 let meta = self.meta();
353 let has_active_guard = loop {
354 // Attempt to acquire sole responsibility for polling the future (by clearing the
355 // INACTIVE bit) and also clear the READY and WITH_ACTIVE_GUARD bits at the same time.
356 // We clear both so that we can track if they are set again whilst we are polling.
357 let old = meta.state.fetch_and(!(INACTIVE | READY | WITH_ACTIVE_GUARD), Acquire);
358 assert_ne!(old & REF_COUNT_MASK, 0);
359 if old & DONE != 0 {
360 // If the DONE bit is set, the WITH_ACTIVE_GUARD bit should be ignored; it may or
361 // may not be set, but it doesn't reflect whether an active guard is held so even
362 // though we just cleared it, we shouldn't release a guard here.
363 return AttemptPollResult::SomeoneElseFinished;
364 }
365 let has_active_guard = old & WITH_ACTIVE_GUARD != 0;
366 if old & INACTIVE != 0 {
367 // We are now the (only) active worker, proceed to poll...
368 if old & ABORTED != 0 {
369 if has_active_guard {
370 meta.scope().release_cancel_guard();
371 }
372 // The future was aborted.
373 // SAFETY: We have exclusive access.
374 unsafe {
375 self.drop_future_unchecked();
376 }
377 return AttemptPollResult::Aborted;
378 }
379 break has_active_guard;
380 }
381 // Future was already active; this shouldn't really happen because we shouldn't be
382 // polling it from multiple threads at the same time. Still, we handle it by setting
383 // the READY bit so that it gets polled again. We do this regardless of whether we
384 // cleared the READY bit above.
385 let old2 = meta.state.fetch_or(READY | (old & WITH_ACTIVE_GUARD), Relaxed);
386
387 if old2 & DONE != 0 {
388 // If `has_active_guard` is true, we are responsible for releasing a guard since it
389 // means we cleared the `WITH_ACTIVE_GUARD` bit.
390 if has_active_guard {
391 meta.scope().release_cancel_guard();
392 }
393 return AttemptPollResult::SomeoneElseFinished;
394 }
395
396 if has_active_guard && old2 & WITH_ACTIVE_GUARD != 0 {
397 // Within the small window, something else gave this task an active guard, so we
398 // must return one of them.
399 meta.scope().release_cancel_guard();
400 }
401
402 // If the future is still active, or the future was already marked as ready, we can
403 // just return and it will get polled again.
404 if old2 & INACTIVE == 0 || old2 & READY != 0 {
405 return AttemptPollResult::Pending;
406 }
407 // The worker finished, and we marked the future as ready, so we must try again because
408 // the future won't be in a run queue.
409 };
410
411 // We cannot recover from panics.
412 let bomb = Bomb;
413
414 // SAFETY: We have exclusive access because we cleared the INACTIVE state bit.
415 let result = unsafe { (meta.vtable.poll)(meta.into(), cx) };
416
417 std::mem::forget(bomb);
418
419 if has_active_guard {
420 meta.scope().release_cancel_guard();
421 }
422
423 if let Poll::Ready(()) = result {
424 // The future will have been dropped, so we just need to set the state.
425 //
426 // This needs to be Release ordering because we need to synchronize with another thread
427 // that takes or drops the result.
428 let old = meta.state.fetch_or(DONE, Release);
429
430 if old & WITH_ACTIVE_GUARD != 0 {
431 // Whilst we were polling the task, it was given an active guard. We must return it
432 // now.
433 meta.scope().release_cancel_guard();
434 }
435
436 if old & DETACHED != 0 {
437 // If the future is detached, we should eagerly drop the result. This can be
438 // Relaxed ordering because the result was written by this thread.
439
440 // SAFETY: The future has completed.
441 unsafe {
442 meta.drop_result(Relaxed);
443 }
444 }
445 // No one else will read `future` unless they see `INACTIVE`, which will never
446 // happen again.
447 AttemptPollResult::IFinished
448 } else if meta.state.fetch_or(INACTIVE, Release) & READY == 0 {
449 AttemptPollResult::Pending
450 } else {
451 // The future was marked ready whilst we were polling, so yield.
452 AttemptPollResult::Yield
453 }
454 }
455
456 /// Drops the future without checking its current state.
457 ///
458 /// # Panics
459 ///
460 /// This will panic if the future is already marked with `DONE`.
461 ///
462 /// # Safety
463 ///
464 /// This doesn't check the current state, so this must only be called if it is known that there
465 /// is no concurrent access. This also does *not* include any memory barriers before dropping
466 /// the future.
467 pub(crate) unsafe fn drop_future_unchecked(&self) {
468 // Set the state first in case we panic when we drop.
469 let meta = self.meta();
470 let old = meta.state.fetch_or(DONE | RESULT_TAKEN, Relaxed);
471 assert_eq!(old & DONE, 0);
472 if old & WITH_ACTIVE_GUARD != 0 {
473 meta.scope().release_cancel_guard();
474 }
475 unsafe { (meta.vtable.drop_future)(meta.into()) };
476 }
477
478 /// Drops the future if it is not currently being polled. Returns success if the future was
479 /// dropped or was already dropped.
480 pub(crate) fn try_drop(&self) -> Result<(), ()> {
481 let old = self.meta().state.fetch_and(!INACTIVE, Acquire);
482 if old & DONE != 0 {
483 Ok(())
484 } else if old & INACTIVE != 0 {
485 // SAFETY: We have exclusive access.
486 unsafe {
487 self.drop_future_unchecked();
488 }
489 Ok(())
490 } else {
491 Err(())
492 }
493 }
494
495 /// Aborts the task. Returns true if the task needs to be added to a run queue.
496 #[must_use]
497 pub(crate) fn abort(&self) -> bool {
498 self.meta().state.fetch_or(ABORTED | READY, Relaxed) & (INACTIVE | READY | DONE) == INACTIVE
499 }
500
501 /// Marks the task as detached.
502 pub(crate) fn detach(&self) {
503 let meta = self.meta();
504 let old = meta.state.fetch_or(DETACHED, Relaxed);
505
506 if old & (DONE | RESULT_TAKEN) == DONE {
507 // If the future is done, we should eagerly drop the result. This needs to be acquire
508 // ordering because another thread might have written the result.
509
510 // SAFETY: The future has completed.
511 unsafe {
512 meta.drop_result(Acquire);
513 }
514 }
515 }
516
517 /// Marks the task as aborted and detached (for when the caller isn't interested in waiting
518 /// for the cancellation to be finished). Returns true if the task should be added to a run
519 /// queue.
520 pub(crate) fn abort_and_detach(&self) -> AbortAndDetachResult {
521 let meta = self.meta();
522 let old_state = meta.state.fetch_or(ABORTED | DETACHED | READY, Relaxed);
523 if old_state & DONE != 0 {
524 // If the future is done, we should eagerly drop the result. This needs to be acquire
525 // ordering because another thread might have written the result.
526
527 // SAFETY: The future has completed.
528 unsafe {
529 meta.drop_result(Acquire);
530 }
531
532 AbortAndDetachResult::Done
533 } else if old_state & (INACTIVE | READY) == INACTIVE {
534 AbortAndDetachResult::AddToRunQueue
535 } else {
536 AbortAndDetachResult::Pending
537 }
538 }
539
540 /// Returns true if the task is detached.
541 pub(crate) fn is_detached(&self) -> bool {
542 self.meta().state.load(Relaxed) & DETACHED != 0
543 }
544
545 /// Returns true if the task is aborted.
546 pub(crate) fn is_aborted(&self) -> bool {
547 self.meta().state.load(Relaxed) & ABORTED != 0
548 }
549
550 /// Takes the result.
551 ///
552 /// # Safety
553 ///
554 /// The caller must guarantee that `R` is the correct type.
555 pub(crate) unsafe fn take_result<R>(&self) -> Option<R> {
556 // This needs to be Acquire ordering to synchronize with the polling thread.
557 let meta = self.meta();
558 if meta.state.load(Relaxed) & (DONE | RESULT_TAKEN) == DONE
559 && meta.state.fetch_or(RESULT_TAKEN, Acquire) & RESULT_TAKEN == 0
560 {
561 Some(unsafe { ((meta.vtable.get_result)(meta.into()) as *const R).read() })
562 } else {
563 None
564 }
565 }
566
567 /// Marks the task as low priority. Returns the old state.
568 pub(crate) fn set_low_priority(&self, v: bool) -> bool {
569 let prev = if v {
570 self.meta().state.fetch_or(LOW_PRIORITY, Relaxed)
571 } else {
572 self.meta().state.fetch_and(!LOW_PRIORITY, Relaxed)
573 };
574 prev & LOW_PRIORITY != 0
575 }
576
577 /// Returns true if this is a low priority task.
578 pub(crate) fn is_low_priority(&self) -> bool {
579 self.meta().state.load(Relaxed) & LOW_PRIORITY != 0
580 }
581}
582
583impl AtomicFutureHandle<'static> {
584 /// Returns a waker for the future.
585 pub(crate) fn waker(&self) -> BorrowedWaker<'_> {
586 static BORROWED_WAKER_VTABLE: RawWakerVTable =
587 RawWakerVTable::new(waker_clone, waker_wake_by_ref, waker_wake_by_ref, waker_noop);
588 static WAKER_VTABLE: RawWakerVTable =
589 RawWakerVTable::new(waker_clone, waker_wake, waker_wake_by_ref, waker_drop);
590
591 fn waker_clone(raw_meta: *const ()) -> RawWaker {
592 // SAFETY: We did the reverse cast below.
593 let meta = unsafe { &*(raw_meta as *const Meta) };
594 meta.retain();
595 RawWaker::new(raw_meta, &WAKER_VTABLE)
596 }
597
598 fn waker_wake(raw_meta: *const ()) {
599 // SAFETY: We did the reverse cast below.
600 let meta = unsafe { &*(raw_meta as *const Meta) };
601 if meta.state.fetch_or(READY, Relaxed) & (INACTIVE | READY | DONE) == INACTIVE {
602 // This consumes the reference count.
603 meta.scope().executor().task_is_ready(AtomicFutureHandle(
604 // SAFETY: We know raw_meta is not null.
605 unsafe { NonNull::new_unchecked(raw_meta as *mut Meta) },
606 PhantomData,
607 ));
608 } else {
609 meta.release();
610 }
611 }
612
613 fn waker_wake_by_ref(meta: *const ()) {
614 // SAFETY: We did the reverse cast below.
615 let meta = unsafe { &*(meta as *const Meta) };
616 // SAFETY: The lifetime on `AtomicFutureHandle` is 'static.
617 unsafe {
618 meta.wake();
619 }
620 }
621
622 fn waker_noop(_meta: *const ()) {}
623
624 fn waker_drop(meta: *const ()) {
625 // SAFETY: We did the reverse cast below.
626 let meta = unsafe { &*(meta as *const Meta) };
627 meta.release();
628 }
629
630 BorrowedWaker(
631 // SAFETY: We meet the contract for RawWaker/RawWakerVtable.
632 unsafe {
633 Waker::from_raw(RawWaker::new(self.0.as_ptr() as *const (), &BORROWED_WAKER_VTABLE))
634 },
635 PhantomData,
636 )
637 }
638
639 /// Wakes the future.
640 pub(crate) fn wake(&self) {
641 // SAFETY: The lifetime on `AtomicFutureHandle` is 'static.
642 unsafe {
643 self.meta().wake();
644 }
645 }
646
647 /// Wakes the future with an active guard. Returns true if successful i.e. a guard needs to be
648 /// acquired.
649 ///
650 /// NOTE: `Scope::release_cancel_guard` can be called *before* this function returns because the
651 /// task can be polled on another thread. For this reason, the caller either needs to hold a
652 /// lock, or it should preemptively take the guard.
653 pub(crate) fn wake_with_active_guard(&self) -> bool {
654 // SAFETY: The lifetime on `AtomicFutureHandle` is 'static.
655 unsafe { self.meta().wake_with_active_guard() }
656 }
657}
658
659impl<F: Future> Drop for AtomicFuture<F> {
660 fn drop(&mut self) {
661 let meta = &mut self.meta;
662 // This needs to be acquire ordering so that we see writes that might have just happened
663 // in another thread when the future was polled.
664 let state = meta.state.load(Acquire);
665 if state & DONE == 0 {
666 // SAFETY: The state isn't DONE so we must drop the future.
667 unsafe {
668 (meta.vtable.drop_future)(meta.into());
669 }
670 } else if state & RESULT_TAKEN == 0 {
671 // SAFETY: The result hasn't been taken so we must drop the result.
672 unsafe {
673 (meta.vtable.drop_result)(meta.into());
674 }
675 }
676 }
677}
678
679pub struct BorrowedWaker<'a>(std::task::Waker, PhantomData<&'a ()>);
680
681impl Deref for BorrowedWaker<'_> {
682 type Target = Waker;
683
684 fn deref(&self) -> &Self::Target {
685 &self.0
686 }
687}
688
689struct Bomb;
690impl Drop for Bomb {
691 fn drop(&mut self) {
692 std::process::abort();
693 }
694}