fidl_fuchsia_memorypressure/
fidl_fuchsia_memorypressure.rs

1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_memorypressure_common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
15pub struct ProviderRegisterWatcherRequest {
16    pub watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
17}
18
19impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
20    for ProviderRegisterWatcherRequest
21{
22}
23
24#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
25pub struct ProviderMarker;
26
27impl fidl::endpoints::ProtocolMarker for ProviderMarker {
28    type Proxy = ProviderProxy;
29    type RequestStream = ProviderRequestStream;
30    #[cfg(target_os = "fuchsia")]
31    type SynchronousProxy = ProviderSynchronousProxy;
32
33    const DEBUG_NAME: &'static str = "fuchsia.memorypressure.Provider";
34}
35impl fidl::endpoints::DiscoverableProtocolMarker for ProviderMarker {}
36
37pub trait ProviderProxyInterface: Send + Sync {
38    fn r#register_watcher(
39        &self,
40        watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
41    ) -> Result<(), fidl::Error>;
42}
43#[derive(Debug)]
44#[cfg(target_os = "fuchsia")]
45pub struct ProviderSynchronousProxy {
46    client: fidl::client::sync::Client,
47}
48
49#[cfg(target_os = "fuchsia")]
50impl fidl::endpoints::SynchronousProxy for ProviderSynchronousProxy {
51    type Proxy = ProviderProxy;
52    type Protocol = ProviderMarker;
53
54    fn from_channel(inner: fidl::Channel) -> Self {
55        Self::new(inner)
56    }
57
58    fn into_channel(self) -> fidl::Channel {
59        self.client.into_channel()
60    }
61
62    fn as_channel(&self) -> &fidl::Channel {
63        self.client.as_channel()
64    }
65}
66
67#[cfg(target_os = "fuchsia")]
68impl ProviderSynchronousProxy {
69    pub fn new(channel: fidl::Channel) -> Self {
70        let protocol_name = <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
71        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
72    }
73
74    pub fn into_channel(self) -> fidl::Channel {
75        self.client.into_channel()
76    }
77
78    /// Waits until an event arrives and returns it. It is safe for other
79    /// threads to make concurrent requests while waiting for an event.
80    pub fn wait_for_event(
81        &self,
82        deadline: zx::MonotonicInstant,
83    ) -> Result<ProviderEvent, fidl::Error> {
84        ProviderEvent::decode(self.client.wait_for_event(deadline)?)
85    }
86
87    /// Used to register for memory pressure level changes.
88    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
89    /// level change messages to the client.
90    ///
91    /// The current memory pressure level is immediately sent to the watcher
92    /// when this method is called.
93    ///
94    /// It is recommended that the root job in a component tree register for changes,
95    /// rather than having individual jobs further down the tree register individually.
96    /// A low client count will help minimize system churn due to a large number of
97    /// memory pressure messages in transit at the same time.
98    /// Also, the more context a job has, the better equipped it will be to react to
99    /// memory pressure by controlling the behavior of children jobs in its tree.
100    pub fn r#register_watcher(
101        &self,
102        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
103    ) -> Result<(), fidl::Error> {
104        self.client.send::<ProviderRegisterWatcherRequest>(
105            (watcher,),
106            0x91e65af25aae4a9,
107            fidl::encoding::DynamicFlags::empty(),
108        )
109    }
110}
111
112#[derive(Debug, Clone)]
113pub struct ProviderProxy {
114    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
115}
116
117impl fidl::endpoints::Proxy for ProviderProxy {
118    type Protocol = ProviderMarker;
119
120    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
121        Self::new(inner)
122    }
123
124    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
125        self.client.into_channel().map_err(|client| Self { client })
126    }
127
128    fn as_channel(&self) -> &::fidl::AsyncChannel {
129        self.client.as_channel()
130    }
131}
132
133impl ProviderProxy {
134    /// Create a new Proxy for fuchsia.memorypressure/Provider.
135    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
136        let protocol_name = <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
137        Self { client: fidl::client::Client::new(channel, protocol_name) }
138    }
139
140    /// Get a Stream of events from the remote end of the protocol.
141    ///
142    /// # Panics
143    ///
144    /// Panics if the event stream was already taken.
145    pub fn take_event_stream(&self) -> ProviderEventStream {
146        ProviderEventStream { event_receiver: self.client.take_event_receiver() }
147    }
148
149    /// Used to register for memory pressure level changes.
150    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
151    /// level change messages to the client.
152    ///
153    /// The current memory pressure level is immediately sent to the watcher
154    /// when this method is called.
155    ///
156    /// It is recommended that the root job in a component tree register for changes,
157    /// rather than having individual jobs further down the tree register individually.
158    /// A low client count will help minimize system churn due to a large number of
159    /// memory pressure messages in transit at the same time.
160    /// Also, the more context a job has, the better equipped it will be to react to
161    /// memory pressure by controlling the behavior of children jobs in its tree.
162    pub fn r#register_watcher(
163        &self,
164        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
165    ) -> Result<(), fidl::Error> {
166        ProviderProxyInterface::r#register_watcher(self, watcher)
167    }
168}
169
170impl ProviderProxyInterface for ProviderProxy {
171    fn r#register_watcher(
172        &self,
173        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
174    ) -> Result<(), fidl::Error> {
175        self.client.send::<ProviderRegisterWatcherRequest>(
176            (watcher,),
177            0x91e65af25aae4a9,
178            fidl::encoding::DynamicFlags::empty(),
179        )
180    }
181}
182
183pub struct ProviderEventStream {
184    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
185}
186
187impl std::marker::Unpin for ProviderEventStream {}
188
189impl futures::stream::FusedStream for ProviderEventStream {
190    fn is_terminated(&self) -> bool {
191        self.event_receiver.is_terminated()
192    }
193}
194
195impl futures::Stream for ProviderEventStream {
196    type Item = Result<ProviderEvent, fidl::Error>;
197
198    fn poll_next(
199        mut self: std::pin::Pin<&mut Self>,
200        cx: &mut std::task::Context<'_>,
201    ) -> std::task::Poll<Option<Self::Item>> {
202        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
203            &mut self.event_receiver,
204            cx
205        )?) {
206            Some(buf) => std::task::Poll::Ready(Some(ProviderEvent::decode(buf))),
207            None => std::task::Poll::Ready(None),
208        }
209    }
210}
211
212#[derive(Debug)]
213pub enum ProviderEvent {}
214
215impl ProviderEvent {
216    /// Decodes a message buffer as a [`ProviderEvent`].
217    fn decode(
218        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
219    ) -> Result<ProviderEvent, fidl::Error> {
220        let (bytes, _handles) = buf.split_mut();
221        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
222        debug_assert_eq!(tx_header.tx_id, 0);
223        match tx_header.ordinal {
224            _ => Err(fidl::Error::UnknownOrdinal {
225                ordinal: tx_header.ordinal,
226                protocol_name: <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
227            }),
228        }
229    }
230}
231
232/// A Stream of incoming requests for fuchsia.memorypressure/Provider.
233pub struct ProviderRequestStream {
234    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
235    is_terminated: bool,
236}
237
238impl std::marker::Unpin for ProviderRequestStream {}
239
240impl futures::stream::FusedStream for ProviderRequestStream {
241    fn is_terminated(&self) -> bool {
242        self.is_terminated
243    }
244}
245
246impl fidl::endpoints::RequestStream for ProviderRequestStream {
247    type Protocol = ProviderMarker;
248    type ControlHandle = ProviderControlHandle;
249
250    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
251        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
252    }
253
254    fn control_handle(&self) -> Self::ControlHandle {
255        ProviderControlHandle { inner: self.inner.clone() }
256    }
257
258    fn into_inner(
259        self,
260    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
261    {
262        (self.inner, self.is_terminated)
263    }
264
265    fn from_inner(
266        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
267        is_terminated: bool,
268    ) -> Self {
269        Self { inner, is_terminated }
270    }
271}
272
273impl futures::Stream for ProviderRequestStream {
274    type Item = Result<ProviderRequest, fidl::Error>;
275
276    fn poll_next(
277        mut self: std::pin::Pin<&mut Self>,
278        cx: &mut std::task::Context<'_>,
279    ) -> std::task::Poll<Option<Self::Item>> {
280        let this = &mut *self;
281        if this.inner.check_shutdown(cx) {
282            this.is_terminated = true;
283            return std::task::Poll::Ready(None);
284        }
285        if this.is_terminated {
286            panic!("polled ProviderRequestStream after completion");
287        }
288        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
289            |bytes, handles| {
290                match this.inner.channel().read_etc(cx, bytes, handles) {
291                    std::task::Poll::Ready(Ok(())) => {}
292                    std::task::Poll::Pending => return std::task::Poll::Pending,
293                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
294                        this.is_terminated = true;
295                        return std::task::Poll::Ready(None);
296                    }
297                    std::task::Poll::Ready(Err(e)) => {
298                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
299                            e.into(),
300                        ))))
301                    }
302                }
303
304                // A message has been received from the channel
305                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
306
307                std::task::Poll::Ready(Some(match header.ordinal {
308                    0x91e65af25aae4a9 => {
309                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
310                        let mut req = fidl::new_empty!(
311                            ProviderRegisterWatcherRequest,
312                            fidl::encoding::DefaultFuchsiaResourceDialect
313                        );
314                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<ProviderRegisterWatcherRequest>(&header, _body_bytes, handles, &mut req)?;
315                        let control_handle = ProviderControlHandle { inner: this.inner.clone() };
316                        Ok(ProviderRequest::RegisterWatcher {
317                            watcher: req.watcher,
318
319                            control_handle,
320                        })
321                    }
322                    _ => Err(fidl::Error::UnknownOrdinal {
323                        ordinal: header.ordinal,
324                        protocol_name:
325                            <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
326                    }),
327                }))
328            },
329        )
330    }
331}
332
333/// Registration protocol
334#[derive(Debug)]
335pub enum ProviderRequest {
336    /// Used to register for memory pressure level changes.
337    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
338    /// level change messages to the client.
339    ///
340    /// The current memory pressure level is immediately sent to the watcher
341    /// when this method is called.
342    ///
343    /// It is recommended that the root job in a component tree register for changes,
344    /// rather than having individual jobs further down the tree register individually.
345    /// A low client count will help minimize system churn due to a large number of
346    /// memory pressure messages in transit at the same time.
347    /// Also, the more context a job has, the better equipped it will be to react to
348    /// memory pressure by controlling the behavior of children jobs in its tree.
349    RegisterWatcher {
350        watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
351        control_handle: ProviderControlHandle,
352    },
353}
354
355impl ProviderRequest {
356    #[allow(irrefutable_let_patterns)]
357    pub fn into_register_watcher(
358        self,
359    ) -> Option<(fidl::endpoints::ClientEnd<WatcherMarker>, ProviderControlHandle)> {
360        if let ProviderRequest::RegisterWatcher { watcher, control_handle } = self {
361            Some((watcher, control_handle))
362        } else {
363            None
364        }
365    }
366
367    /// Name of the method defined in FIDL
368    pub fn method_name(&self) -> &'static str {
369        match *self {
370            ProviderRequest::RegisterWatcher { .. } => "register_watcher",
371        }
372    }
373}
374
375#[derive(Debug, Clone)]
376pub struct ProviderControlHandle {
377    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
378}
379
380impl fidl::endpoints::ControlHandle for ProviderControlHandle {
381    fn shutdown(&self) {
382        self.inner.shutdown()
383    }
384    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
385        self.inner.shutdown_with_epitaph(status)
386    }
387
388    fn is_closed(&self) -> bool {
389        self.inner.channel().is_closed()
390    }
391    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
392        self.inner.channel().on_closed()
393    }
394
395    #[cfg(target_os = "fuchsia")]
396    fn signal_peer(
397        &self,
398        clear_mask: zx::Signals,
399        set_mask: zx::Signals,
400    ) -> Result<(), zx_status::Status> {
401        use fidl::Peered;
402        self.inner.channel().signal_peer(clear_mask, set_mask)
403    }
404}
405
406impl ProviderControlHandle {}
407
408#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
409pub struct WatcherMarker;
410
411impl fidl::endpoints::ProtocolMarker for WatcherMarker {
412    type Proxy = WatcherProxy;
413    type RequestStream = WatcherRequestStream;
414    #[cfg(target_os = "fuchsia")]
415    type SynchronousProxy = WatcherSynchronousProxy;
416
417    const DEBUG_NAME: &'static str = "(anonymous) Watcher";
418}
419
420pub trait WatcherProxyInterface: Send + Sync {
421    type OnLevelChangedResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
422    fn r#on_level_changed(&self, level: Level) -> Self::OnLevelChangedResponseFut;
423}
424#[derive(Debug)]
425#[cfg(target_os = "fuchsia")]
426pub struct WatcherSynchronousProxy {
427    client: fidl::client::sync::Client,
428}
429
430#[cfg(target_os = "fuchsia")]
431impl fidl::endpoints::SynchronousProxy for WatcherSynchronousProxy {
432    type Proxy = WatcherProxy;
433    type Protocol = WatcherMarker;
434
435    fn from_channel(inner: fidl::Channel) -> Self {
436        Self::new(inner)
437    }
438
439    fn into_channel(self) -> fidl::Channel {
440        self.client.into_channel()
441    }
442
443    fn as_channel(&self) -> &fidl::Channel {
444        self.client.as_channel()
445    }
446}
447
448#[cfg(target_os = "fuchsia")]
449impl WatcherSynchronousProxy {
450    pub fn new(channel: fidl::Channel) -> Self {
451        let protocol_name = <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
452        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
453    }
454
455    pub fn into_channel(self) -> fidl::Channel {
456        self.client.into_channel()
457    }
458
459    /// Waits until an event arrives and returns it. It is safe for other
460    /// threads to make concurrent requests while waiting for an event.
461    pub fn wait_for_event(
462        &self,
463        deadline: zx::MonotonicInstant,
464    ) -> Result<WatcherEvent, fidl::Error> {
465        WatcherEvent::decode(self.client.wait_for_event(deadline)?)
466    }
467
468    /// Sent to the registered client when the memory pressure level changes.
469    /// `level`: indicates the current memory pressure level.
470    ///
471    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
472    /// registered client can discover the current memory pressure level.
473    ///
474    /// The watcher must immediately reply with a message to acknowledge that it has
475    /// received the level change notification, and has initiated required actions as a
476    /// result. It may then continue to reclaim memory asynchronously after sending
477    /// the acknowledgement.
478    ///
479    /// Some helpful guidelines for clients:
480    /// 1. The watcher will be notified of new pressure level changes only after a reply
481    /// corresponding to the previous message has been received by the provider.
482    /// If multiple level transitions occur during that time, the watcher will be
483    /// notified of the latest pressure level.
484    ///
485    /// 2. The level changes are edge-triggered, and clients are expected to maintain
486    /// local state to track the current pressure level, if required. For example,
487    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
488    /// Some time after this, it might want to trigger an activity that causes a
489    /// fair amount of memory to be allocated. At this point, the job is expected to
490    /// remember that the last pressure level it saw was CRITICAL, and refrain from
491    /// triggering the memory-intensive activity.
492    ///
493    /// 3. As a performance optimization, the provider may decide to skip sending
494    /// messages for some pressure level changes. For example, when oscillating across
495    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
496    /// single transition. The provider might rate-limit messages in this case.
497    /// On a similar note, the provider may decide to send repeated messages at the
498    /// same pressure level, particularly CRITICAL, to indicate that further action
499    /// needs to be taken.
500    pub fn r#on_level_changed(
501        &self,
502        mut level: Level,
503        ___deadline: zx::MonotonicInstant,
504    ) -> Result<(), fidl::Error> {
505        let _response =
506            self.client.send_query::<WatcherOnLevelChangedRequest, fidl::encoding::EmptyPayload>(
507                (level,),
508                0x55d559533407fed9,
509                fidl::encoding::DynamicFlags::empty(),
510                ___deadline,
511            )?;
512        Ok(_response)
513    }
514}
515
516#[derive(Debug, Clone)]
517pub struct WatcherProxy {
518    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
519}
520
521impl fidl::endpoints::Proxy for WatcherProxy {
522    type Protocol = WatcherMarker;
523
524    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
525        Self::new(inner)
526    }
527
528    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
529        self.client.into_channel().map_err(|client| Self { client })
530    }
531
532    fn as_channel(&self) -> &::fidl::AsyncChannel {
533        self.client.as_channel()
534    }
535}
536
537impl WatcherProxy {
538    /// Create a new Proxy for fuchsia.memorypressure/Watcher.
539    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
540        let protocol_name = <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
541        Self { client: fidl::client::Client::new(channel, protocol_name) }
542    }
543
544    /// Get a Stream of events from the remote end of the protocol.
545    ///
546    /// # Panics
547    ///
548    /// Panics if the event stream was already taken.
549    pub fn take_event_stream(&self) -> WatcherEventStream {
550        WatcherEventStream { event_receiver: self.client.take_event_receiver() }
551    }
552
553    /// Sent to the registered client when the memory pressure level changes.
554    /// `level`: indicates the current memory pressure level.
555    ///
556    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
557    /// registered client can discover the current memory pressure level.
558    ///
559    /// The watcher must immediately reply with a message to acknowledge that it has
560    /// received the level change notification, and has initiated required actions as a
561    /// result. It may then continue to reclaim memory asynchronously after sending
562    /// the acknowledgement.
563    ///
564    /// Some helpful guidelines for clients:
565    /// 1. The watcher will be notified of new pressure level changes only after a reply
566    /// corresponding to the previous message has been received by the provider.
567    /// If multiple level transitions occur during that time, the watcher will be
568    /// notified of the latest pressure level.
569    ///
570    /// 2. The level changes are edge-triggered, and clients are expected to maintain
571    /// local state to track the current pressure level, if required. For example,
572    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
573    /// Some time after this, it might want to trigger an activity that causes a
574    /// fair amount of memory to be allocated. At this point, the job is expected to
575    /// remember that the last pressure level it saw was CRITICAL, and refrain from
576    /// triggering the memory-intensive activity.
577    ///
578    /// 3. As a performance optimization, the provider may decide to skip sending
579    /// messages for some pressure level changes. For example, when oscillating across
580    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
581    /// single transition. The provider might rate-limit messages in this case.
582    /// On a similar note, the provider may decide to send repeated messages at the
583    /// same pressure level, particularly CRITICAL, to indicate that further action
584    /// needs to be taken.
585    pub fn r#on_level_changed(
586        &self,
587        mut level: Level,
588    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
589        WatcherProxyInterface::r#on_level_changed(self, level)
590    }
591}
592
593impl WatcherProxyInterface for WatcherProxy {
594    type OnLevelChangedResponseFut =
595        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
596    fn r#on_level_changed(&self, mut level: Level) -> Self::OnLevelChangedResponseFut {
597        fn _decode(
598            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
599        ) -> Result<(), fidl::Error> {
600            let _response = fidl::client::decode_transaction_body::<
601                fidl::encoding::EmptyPayload,
602                fidl::encoding::DefaultFuchsiaResourceDialect,
603                0x55d559533407fed9,
604            >(_buf?)?;
605            Ok(_response)
606        }
607        self.client.send_query_and_decode::<WatcherOnLevelChangedRequest, ()>(
608            (level,),
609            0x55d559533407fed9,
610            fidl::encoding::DynamicFlags::empty(),
611            _decode,
612        )
613    }
614}
615
616pub struct WatcherEventStream {
617    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
618}
619
620impl std::marker::Unpin for WatcherEventStream {}
621
622impl futures::stream::FusedStream for WatcherEventStream {
623    fn is_terminated(&self) -> bool {
624        self.event_receiver.is_terminated()
625    }
626}
627
628impl futures::Stream for WatcherEventStream {
629    type Item = Result<WatcherEvent, fidl::Error>;
630
631    fn poll_next(
632        mut self: std::pin::Pin<&mut Self>,
633        cx: &mut std::task::Context<'_>,
634    ) -> std::task::Poll<Option<Self::Item>> {
635        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
636            &mut self.event_receiver,
637            cx
638        )?) {
639            Some(buf) => std::task::Poll::Ready(Some(WatcherEvent::decode(buf))),
640            None => std::task::Poll::Ready(None),
641        }
642    }
643}
644
645#[derive(Debug)]
646pub enum WatcherEvent {}
647
648impl WatcherEvent {
649    /// Decodes a message buffer as a [`WatcherEvent`].
650    fn decode(
651        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
652    ) -> Result<WatcherEvent, fidl::Error> {
653        let (bytes, _handles) = buf.split_mut();
654        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
655        debug_assert_eq!(tx_header.tx_id, 0);
656        match tx_header.ordinal {
657            _ => Err(fidl::Error::UnknownOrdinal {
658                ordinal: tx_header.ordinal,
659                protocol_name: <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
660            }),
661        }
662    }
663}
664
665/// A Stream of incoming requests for fuchsia.memorypressure/Watcher.
666pub struct WatcherRequestStream {
667    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
668    is_terminated: bool,
669}
670
671impl std::marker::Unpin for WatcherRequestStream {}
672
673impl futures::stream::FusedStream for WatcherRequestStream {
674    fn is_terminated(&self) -> bool {
675        self.is_terminated
676    }
677}
678
679impl fidl::endpoints::RequestStream for WatcherRequestStream {
680    type Protocol = WatcherMarker;
681    type ControlHandle = WatcherControlHandle;
682
683    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
684        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
685    }
686
687    fn control_handle(&self) -> Self::ControlHandle {
688        WatcherControlHandle { inner: self.inner.clone() }
689    }
690
691    fn into_inner(
692        self,
693    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
694    {
695        (self.inner, self.is_terminated)
696    }
697
698    fn from_inner(
699        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
700        is_terminated: bool,
701    ) -> Self {
702        Self { inner, is_terminated }
703    }
704}
705
706impl futures::Stream for WatcherRequestStream {
707    type Item = Result<WatcherRequest, fidl::Error>;
708
709    fn poll_next(
710        mut self: std::pin::Pin<&mut Self>,
711        cx: &mut std::task::Context<'_>,
712    ) -> std::task::Poll<Option<Self::Item>> {
713        let this = &mut *self;
714        if this.inner.check_shutdown(cx) {
715            this.is_terminated = true;
716            return std::task::Poll::Ready(None);
717        }
718        if this.is_terminated {
719            panic!("polled WatcherRequestStream after completion");
720        }
721        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
722            |bytes, handles| {
723                match this.inner.channel().read_etc(cx, bytes, handles) {
724                    std::task::Poll::Ready(Ok(())) => {}
725                    std::task::Poll::Pending => return std::task::Poll::Pending,
726                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
727                        this.is_terminated = true;
728                        return std::task::Poll::Ready(None);
729                    }
730                    std::task::Poll::Ready(Err(e)) => {
731                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
732                            e.into(),
733                        ))))
734                    }
735                }
736
737                // A message has been received from the channel
738                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
739
740                std::task::Poll::Ready(Some(match header.ordinal {
741                    0x55d559533407fed9 => {
742                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
743                        let mut req = fidl::new_empty!(
744                            WatcherOnLevelChangedRequest,
745                            fidl::encoding::DefaultFuchsiaResourceDialect
746                        );
747                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<WatcherOnLevelChangedRequest>(&header, _body_bytes, handles, &mut req)?;
748                        let control_handle = WatcherControlHandle { inner: this.inner.clone() };
749                        Ok(WatcherRequest::OnLevelChanged {
750                            level: req.level,
751
752                            responder: WatcherOnLevelChangedResponder {
753                                control_handle: std::mem::ManuallyDrop::new(control_handle),
754                                tx_id: header.tx_id,
755                            },
756                        })
757                    }
758                    _ => Err(fidl::Error::UnknownOrdinal {
759                        ordinal: header.ordinal,
760                        protocol_name:
761                            <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
762                    }),
763                }))
764            },
765        )
766    }
767}
768
769/// Watcher protocol
770/// To be implemented by clients who wish to be notified on memory pressure level changes.
771#[derive(Debug)]
772pub enum WatcherRequest {
773    /// Sent to the registered client when the memory pressure level changes.
774    /// `level`: indicates the current memory pressure level.
775    ///
776    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
777    /// registered client can discover the current memory pressure level.
778    ///
779    /// The watcher must immediately reply with a message to acknowledge that it has
780    /// received the level change notification, and has initiated required actions as a
781    /// result. It may then continue to reclaim memory asynchronously after sending
782    /// the acknowledgement.
783    ///
784    /// Some helpful guidelines for clients:
785    /// 1. The watcher will be notified of new pressure level changes only after a reply
786    /// corresponding to the previous message has been received by the provider.
787    /// If multiple level transitions occur during that time, the watcher will be
788    /// notified of the latest pressure level.
789    ///
790    /// 2. The level changes are edge-triggered, and clients are expected to maintain
791    /// local state to track the current pressure level, if required. For example,
792    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
793    /// Some time after this, it might want to trigger an activity that causes a
794    /// fair amount of memory to be allocated. At this point, the job is expected to
795    /// remember that the last pressure level it saw was CRITICAL, and refrain from
796    /// triggering the memory-intensive activity.
797    ///
798    /// 3. As a performance optimization, the provider may decide to skip sending
799    /// messages for some pressure level changes. For example, when oscillating across
800    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
801    /// single transition. The provider might rate-limit messages in this case.
802    /// On a similar note, the provider may decide to send repeated messages at the
803    /// same pressure level, particularly CRITICAL, to indicate that further action
804    /// needs to be taken.
805    OnLevelChanged { level: Level, responder: WatcherOnLevelChangedResponder },
806}
807
808impl WatcherRequest {
809    #[allow(irrefutable_let_patterns)]
810    pub fn into_on_level_changed(self) -> Option<(Level, WatcherOnLevelChangedResponder)> {
811        if let WatcherRequest::OnLevelChanged { level, responder } = self {
812            Some((level, responder))
813        } else {
814            None
815        }
816    }
817
818    /// Name of the method defined in FIDL
819    pub fn method_name(&self) -> &'static str {
820        match *self {
821            WatcherRequest::OnLevelChanged { .. } => "on_level_changed",
822        }
823    }
824}
825
826#[derive(Debug, Clone)]
827pub struct WatcherControlHandle {
828    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
829}
830
831impl fidl::endpoints::ControlHandle for WatcherControlHandle {
832    fn shutdown(&self) {
833        self.inner.shutdown()
834    }
835    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
836        self.inner.shutdown_with_epitaph(status)
837    }
838
839    fn is_closed(&self) -> bool {
840        self.inner.channel().is_closed()
841    }
842    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
843        self.inner.channel().on_closed()
844    }
845
846    #[cfg(target_os = "fuchsia")]
847    fn signal_peer(
848        &self,
849        clear_mask: zx::Signals,
850        set_mask: zx::Signals,
851    ) -> Result<(), zx_status::Status> {
852        use fidl::Peered;
853        self.inner.channel().signal_peer(clear_mask, set_mask)
854    }
855}
856
857impl WatcherControlHandle {}
858
859#[must_use = "FIDL methods require a response to be sent"]
860#[derive(Debug)]
861pub struct WatcherOnLevelChangedResponder {
862    control_handle: std::mem::ManuallyDrop<WatcherControlHandle>,
863    tx_id: u32,
864}
865
866/// Set the the channel to be shutdown (see [`WatcherControlHandle::shutdown`])
867/// if the responder is dropped without sending a response, so that the client
868/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
869impl std::ops::Drop for WatcherOnLevelChangedResponder {
870    fn drop(&mut self) {
871        self.control_handle.shutdown();
872        // Safety: drops once, never accessed again
873        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
874    }
875}
876
877impl fidl::endpoints::Responder for WatcherOnLevelChangedResponder {
878    type ControlHandle = WatcherControlHandle;
879
880    fn control_handle(&self) -> &WatcherControlHandle {
881        &self.control_handle
882    }
883
884    fn drop_without_shutdown(mut self) {
885        // Safety: drops once, never accessed again due to mem::forget
886        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
887        // Prevent Drop from running (which would shut down the channel)
888        std::mem::forget(self);
889    }
890}
891
892impl WatcherOnLevelChangedResponder {
893    /// Sends a response to the FIDL transaction.
894    ///
895    /// Sets the channel to shutdown if an error occurs.
896    pub fn send(self) -> Result<(), fidl::Error> {
897        let _result = self.send_raw();
898        if _result.is_err() {
899            self.control_handle.shutdown();
900        }
901        self.drop_without_shutdown();
902        _result
903    }
904
905    /// Similar to "send" but does not shutdown the channel if an error occurs.
906    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
907        let _result = self.send_raw();
908        self.drop_without_shutdown();
909        _result
910    }
911
912    fn send_raw(&self) -> Result<(), fidl::Error> {
913        self.control_handle.inner.send::<fidl::encoding::EmptyPayload>(
914            (),
915            self.tx_id,
916            0x55d559533407fed9,
917            fidl::encoding::DynamicFlags::empty(),
918        )
919    }
920}
921
922mod internal {
923    use super::*;
924
925    impl fidl::encoding::ResourceTypeMarker for ProviderRegisterWatcherRequest {
926        type Borrowed<'a> = &'a mut Self;
927        fn take_or_borrow<'a>(
928            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
929        ) -> Self::Borrowed<'a> {
930            value
931        }
932    }
933
934    unsafe impl fidl::encoding::TypeMarker for ProviderRegisterWatcherRequest {
935        type Owned = Self;
936
937        #[inline(always)]
938        fn inline_align(_context: fidl::encoding::Context) -> usize {
939            4
940        }
941
942        #[inline(always)]
943        fn inline_size(_context: fidl::encoding::Context) -> usize {
944            4
945        }
946    }
947
948    unsafe impl
949        fidl::encoding::Encode<
950            ProviderRegisterWatcherRequest,
951            fidl::encoding::DefaultFuchsiaResourceDialect,
952        > for &mut ProviderRegisterWatcherRequest
953    {
954        #[inline]
955        unsafe fn encode(
956            self,
957            encoder: &mut fidl::encoding::Encoder<
958                '_,
959                fidl::encoding::DefaultFuchsiaResourceDialect,
960            >,
961            offset: usize,
962            _depth: fidl::encoding::Depth,
963        ) -> fidl::Result<()> {
964            encoder.debug_check_bounds::<ProviderRegisterWatcherRequest>(offset);
965            // Delegate to tuple encoding.
966            fidl::encoding::Encode::<ProviderRegisterWatcherRequest, fidl::encoding::DefaultFuchsiaResourceDialect>::encode(
967                (
968                    <fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow(&mut self.watcher),
969                ),
970                encoder, offset, _depth
971            )
972        }
973    }
974    unsafe impl<
975            T0: fidl::encoding::Encode<
976                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
977                fidl::encoding::DefaultFuchsiaResourceDialect,
978            >,
979        >
980        fidl::encoding::Encode<
981            ProviderRegisterWatcherRequest,
982            fidl::encoding::DefaultFuchsiaResourceDialect,
983        > for (T0,)
984    {
985        #[inline]
986        unsafe fn encode(
987            self,
988            encoder: &mut fidl::encoding::Encoder<
989                '_,
990                fidl::encoding::DefaultFuchsiaResourceDialect,
991            >,
992            offset: usize,
993            depth: fidl::encoding::Depth,
994        ) -> fidl::Result<()> {
995            encoder.debug_check_bounds::<ProviderRegisterWatcherRequest>(offset);
996            // Zero out padding regions. There's no need to apply masks
997            // because the unmasked parts will be overwritten by fields.
998            // Write the fields.
999            self.0.encode(encoder, offset + 0, depth)?;
1000            Ok(())
1001        }
1002    }
1003
1004    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
1005        for ProviderRegisterWatcherRequest
1006    {
1007        #[inline(always)]
1008        fn new_empty() -> Self {
1009            Self {
1010                watcher: fidl::new_empty!(
1011                    fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1012                    fidl::encoding::DefaultFuchsiaResourceDialect
1013                ),
1014            }
1015        }
1016
1017        #[inline]
1018        unsafe fn decode(
1019            &mut self,
1020            decoder: &mut fidl::encoding::Decoder<
1021                '_,
1022                fidl::encoding::DefaultFuchsiaResourceDialect,
1023            >,
1024            offset: usize,
1025            _depth: fidl::encoding::Depth,
1026        ) -> fidl::Result<()> {
1027            decoder.debug_check_bounds::<Self>(offset);
1028            // Verify that padding bytes are zero.
1029            fidl::decode!(
1030                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1031                fidl::encoding::DefaultFuchsiaResourceDialect,
1032                &mut self.watcher,
1033                decoder,
1034                offset + 0,
1035                _depth
1036            )?;
1037            Ok(())
1038        }
1039    }
1040}