Skip to main content

fidl_fuchsia_sysmem2/
fidl_fuchsia_sysmem2.rs

1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_sysmem2__common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Default, PartialEq)]
15pub struct AllocatorAllocateNonSharedCollectionRequest {
16    pub collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17    #[doc(hidden)]
18    pub __source_breaking: fidl::marker::SourceBreaking,
19}
20
21impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
22    for AllocatorAllocateNonSharedCollectionRequest
23{
24}
25
26#[derive(Debug, Default, PartialEq)]
27pub struct AllocatorAllocateSharedCollectionRequest {
28    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
29    #[doc(hidden)]
30    pub __source_breaking: fidl::marker::SourceBreaking,
31}
32
33impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
34    for AllocatorAllocateSharedCollectionRequest
35{
36}
37
38#[derive(Debug, Default, PartialEq)]
39pub struct AllocatorBindSharedCollectionRequest {
40    pub token: Option<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
41    pub buffer_collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
42    #[doc(hidden)]
43    pub __source_breaking: fidl::marker::SourceBreaking,
44}
45
46impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
47    for AllocatorBindSharedCollectionRequest
48{
49}
50
51#[derive(Debug, Default, PartialEq)]
52pub struct AllocatorGetVmoInfoRequest {
53    /// `vmo` is required to be set; ownership is transferred to the server
54    /// so in most cases a client will duplicate a handle and transfer the
55    /// duplicate via this field.
56    pub vmo: Option<fidl::Vmo>,
57    #[doc(hidden)]
58    pub __source_breaking: fidl::marker::SourceBreaking,
59}
60
61impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
62    for AllocatorGetVmoInfoRequest
63{
64}
65
66#[derive(Debug, Default, PartialEq)]
67pub struct AllocatorGetVmoInfoResponse {
68    pub buffer_collection_id: Option<u64>,
69    pub buffer_index: Option<u64>,
70    pub close_weak_asap: Option<fidl::EventPair>,
71    #[doc(hidden)]
72    pub __source_breaking: fidl::marker::SourceBreaking,
73}
74
75impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
76    for AllocatorGetVmoInfoResponse
77{
78}
79
80#[derive(Debug, Default, PartialEq)]
81pub struct BufferCollectionAttachLifetimeTrackingRequest {
82    pub server_end: Option<fidl::EventPair>,
83    pub buffers_remaining: Option<u32>,
84    #[doc(hidden)]
85    pub __source_breaking: fidl::marker::SourceBreaking,
86}
87
88impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
89    for BufferCollectionAttachLifetimeTrackingRequest
90{
91}
92
93#[derive(Debug, Default, PartialEq)]
94pub struct BufferCollectionAttachTokenRequest {
95    pub rights_attenuation_mask: Option<fidl::Rights>,
96    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
97    #[doc(hidden)]
98    pub __source_breaking: fidl::marker::SourceBreaking,
99}
100
101impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
102    for BufferCollectionAttachTokenRequest
103{
104}
105
106/// Information about a buffer collection and its buffers.
107#[derive(Debug, Default, PartialEq)]
108pub struct BufferCollectionInfo {
109    /// These settings apply to all the buffers in the initial buffer
110    /// allocation.
111    ///
112    /// This field will always be set by sysmem.
113    pub settings: Option<SingleBufferSettings>,
114    /// VMO handles (and vmo_usable_start offset) for each buffer in the
115    /// collection.
116    ///
117    /// The size of this vector is the buffer_count (buffer_count is not sent
118    /// separately).
119    ///
120    /// All buffer VMO handles have identical size and access rights.  The size
121    /// is in settings.buffer_settings.size_bytes.
122    ///
123    /// The VMO access rights are determined based on the usages which the
124    /// client specified when allocating the buffer collection.  For example, a
125    /// client which expressed a read-only usage will receive VMOs without write
126    /// rights.  In addition, the rights can be attenuated by the parameter to
127    /// BufferCollectionToken.Duplicate() calls.
128    ///
129    /// This field will always have VmoBuffer(s) in it, even if the participant
130    /// specifies usage whieh does not require VMO handles.  This permits such a
131    /// participant to know the vmo_usable_start values, in case that's of any
132    /// use to the participant.
133    ///
134    /// This field will always be set by sysmem, even if the participant doesn't
135    /// specify any buffer usage (but the [`fuchsia.sysmem2/VmoBuffer.vmo`]
136    /// sub-field within this field won't be set in that case).
137    pub buffers: Option<Vec<VmoBuffer>>,
138    /// This number is unique among all logical buffer collections per boot.
139    ///
140    /// This ID number will be the same for all BufferCollectionToken(s),
141    /// BufferCollection(s), and BufferCollectionTokenGroup(s) associated with
142    /// the same logical buffer collection (derived from the same root token
143    /// created with fuchsia.sysmem2.Allocator.CreateSharedCollection, or with
144    /// CreateNonSharedCollection).
145    ///
146    /// The same ID can be retrieved from a BufferCollectionToken,
147    /// BufferCollection, or BufferCollectionTokenGroup using
148    /// GetBufferCollectionId (at the cost of a round-trip to sysmem and back).
149    ///
150    /// This field will always be set by sysmem.
151    pub buffer_collection_id: Option<u64>,
152    #[doc(hidden)]
153    pub __source_breaking: fidl::marker::SourceBreaking,
154}
155
156impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for BufferCollectionInfo {}
157
158#[derive(Debug, Default, PartialEq)]
159pub struct BufferCollectionSetConstraintsRequest {
160    pub constraints: Option<BufferCollectionConstraints>,
161    #[doc(hidden)]
162    pub __source_breaking: fidl::marker::SourceBreaking,
163}
164
165impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
166    for BufferCollectionSetConstraintsRequest
167{
168}
169
170#[derive(Debug, Default, PartialEq)]
171pub struct BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
172    pub group_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>>,
173    #[doc(hidden)]
174    pub __source_breaking: fidl::marker::SourceBreaking,
175}
176
177impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
178    for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
179{
180}
181
182#[derive(Debug, Default, PartialEq)]
183pub struct BufferCollectionTokenDuplicateRequest {
184    pub rights_attenuation_mask: Option<fidl::Rights>,
185    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
186    #[doc(hidden)]
187    pub __source_breaking: fidl::marker::SourceBreaking,
188}
189
190impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
191    for BufferCollectionTokenDuplicateRequest
192{
193}
194
195#[derive(Debug, Default, PartialEq)]
196pub struct BufferCollectionTokenGroupCreateChildRequest {
197    /// Must be set.
198    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
199    /// If not set, the default is `ZX_RIGHT_SAME_RIGHTS`.
200    pub rights_attenuation_mask: Option<fidl::Rights>,
201    #[doc(hidden)]
202    pub __source_breaking: fidl::marker::SourceBreaking,
203}
204
205impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
206    for BufferCollectionTokenGroupCreateChildRequest
207{
208}
209
210#[derive(Debug, Default, PartialEq)]
211pub struct BufferCollectionTokenGroupCreateChildrenSyncResponse {
212    pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
213    #[doc(hidden)]
214    pub __source_breaking: fidl::marker::SourceBreaking,
215}
216
217impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
218    for BufferCollectionTokenGroupCreateChildrenSyncResponse
219{
220}
221
222#[derive(Debug, Default, PartialEq)]
223pub struct BufferCollectionTokenDuplicateSyncResponse {
224    pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
225    #[doc(hidden)]
226    pub __source_breaking: fidl::marker::SourceBreaking,
227}
228
229impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
230    for BufferCollectionTokenDuplicateSyncResponse
231{
232}
233
234#[derive(Debug, Default, PartialEq)]
235pub struct BufferCollectionWaitForAllBuffersAllocatedResponse {
236    pub buffer_collection_info: Option<BufferCollectionInfo>,
237    #[doc(hidden)]
238    pub __source_breaking: fidl::marker::SourceBreaking,
239}
240
241impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
242    for BufferCollectionWaitForAllBuffersAllocatedResponse
243{
244}
245
246#[derive(Debug, Default, PartialEq)]
247pub struct NodeAttachNodeTrackingRequest {
248    /// This field must be set. This evenpair end will be closed after the
249    /// `Node` is closed or failed and the node's buffer counts are no
250    /// longer in effect in the logical buffer collection.
251    pub server_end: Option<fidl::EventPair>,
252    #[doc(hidden)]
253    pub __source_breaking: fidl::marker::SourceBreaking,
254}
255
256impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
257    for NodeAttachNodeTrackingRequest
258{
259}
260
261#[derive(Debug, Default, PartialEq)]
262pub struct NodeIsAlternateForRequest {
263    pub node_ref: Option<fidl::Event>,
264    #[doc(hidden)]
265    pub __source_breaking: fidl::marker::SourceBreaking,
266}
267
268impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeIsAlternateForRequest {}
269
270#[derive(Debug, Default, PartialEq)]
271pub struct NodeSetWeakOkRequest {
272    pub for_child_nodes_also: Option<bool>,
273    #[doc(hidden)]
274    pub __source_breaking: fidl::marker::SourceBreaking,
275}
276
277impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeSetWeakOkRequest {}
278
279#[derive(Debug, Default, PartialEq)]
280pub struct NodeGetNodeRefResponse {
281    pub node_ref: Option<fidl::Event>,
282    #[doc(hidden)]
283    pub __source_breaking: fidl::marker::SourceBreaking,
284}
285
286impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeGetNodeRefResponse {}
287
288#[derive(Debug, Default, PartialEq)]
289pub struct VmoBuffer {
290    /// `vmo` can be un-set if a participant has only
291    /// [`fuchsia.sysmem2/BufferUsage.none`] set to `NONE_USAGE` (explicitly or
292    /// implicitly by [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
293    /// without `constraints` set).
294    pub vmo: Option<fidl::Vmo>,
295    /// Offset within the VMO of the first usable byte. Must be < the VMO's size
296    /// in bytes, and leave sufficient room for BufferMemorySettings.size_bytes
297    /// before the end of the VMO.
298    ///
299    /// Currently sysmem will always set this field to 0, and in future, sysmem
300    /// won't set this field to a non-zero value unless all participants have
301    /// explicitly indicated support for non-zero vmo_usable_start (this
302    /// mechanism does not exist as of this comment). A participant that hasn't
303    /// explicitly indicated support for non-zero vmo_usable_start (all current
304    /// clients) should implicitly assume this field is set to 0 without
305    /// actually checking this field.
306    pub vmo_usable_start: Option<u64>,
307    /// This field is set iff `vmo` is a sysmem weak VMO handle.
308    ///
309    /// If the client sent `SetWeakOk`, the client must keep `close_weak_asap`
310    /// around for as long as `vmo`, and must notice `ZX_EVENTPAIR_PEER_CLOSED`.
311    /// If that signal occurs, the client must close `vmo` asap.
312    ///
313    /// If the `vmo` is a sysmem weak VMO handle but the client didn't send
314    /// `SetWeakOk`, this means that a holder of a parent node sent `SetWeakOk`
315    /// with `for_child_nodes_also` true, and the owner of that parent node is
316    /// responsible for paying attention to `close_weak_asap` and informing
317    /// child token participants to close handles. In this case the participant
318    /// that never sent `SetWeakOk` is allowed to retain and/or pay attention to
319    /// `close_weak_asap` (to close the handle faster, or for other reasons such
320    /// as diagnosing overall buffer cleanup timing), but is not required to
321    /// retain or pay attention to `close_weak_asap`.
322    ///
323    /// If sysmem closing the sysmem end of `close_weak_asap` does not result in
324    /// quick closure of all sysmem weak VMO handles to the buffer, that's
325    /// considered a VMO leak, and in that case sysmem will eventually complain
326    /// loudly via syslog (currently 5s later).
327    pub close_weak_asap: Option<fidl::EventPair>,
328    #[doc(hidden)]
329    pub __source_breaking: fidl::marker::SourceBreaking,
330}
331
332impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {}
333
334#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
335pub struct AllocatorMarker;
336
337impl fidl::endpoints::ProtocolMarker for AllocatorMarker {
338    type Proxy = AllocatorProxy;
339    type RequestStream = AllocatorRequestStream;
340    #[cfg(target_os = "fuchsia")]
341    type SynchronousProxy = AllocatorSynchronousProxy;
342
343    const DEBUG_NAME: &'static str = "fuchsia.sysmem2.Allocator";
344}
345impl fidl::endpoints::DiscoverableProtocolMarker for AllocatorMarker {}
346pub type AllocatorGetVmoInfoResult = Result<AllocatorGetVmoInfoResponse, Error>;
347
348pub trait AllocatorProxyInterface: Send + Sync {
349    fn r#allocate_non_shared_collection(
350        &self,
351        payload: AllocatorAllocateNonSharedCollectionRequest,
352    ) -> Result<(), fidl::Error>;
353    fn r#allocate_shared_collection(
354        &self,
355        payload: AllocatorAllocateSharedCollectionRequest,
356    ) -> Result<(), fidl::Error>;
357    fn r#bind_shared_collection(
358        &self,
359        payload: AllocatorBindSharedCollectionRequest,
360    ) -> Result<(), fidl::Error>;
361    type ValidateBufferCollectionTokenResponseFut: std::future::Future<
362            Output = Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error>,
363        > + Send;
364    fn r#validate_buffer_collection_token(
365        &self,
366        payload: &AllocatorValidateBufferCollectionTokenRequest,
367    ) -> Self::ValidateBufferCollectionTokenResponseFut;
368    fn r#set_debug_client_info(
369        &self,
370        payload: &AllocatorSetDebugClientInfoRequest,
371    ) -> Result<(), fidl::Error>;
372    type GetVmoInfoResponseFut: std::future::Future<Output = Result<AllocatorGetVmoInfoResult, fidl::Error>>
373        + Send;
374    fn r#get_vmo_info(&self, payload: AllocatorGetVmoInfoRequest) -> Self::GetVmoInfoResponseFut;
375}
376#[derive(Debug)]
377#[cfg(target_os = "fuchsia")]
378pub struct AllocatorSynchronousProxy {
379    client: fidl::client::sync::Client,
380}
381
382#[cfg(target_os = "fuchsia")]
383impl fidl::endpoints::SynchronousProxy for AllocatorSynchronousProxy {
384    type Proxy = AllocatorProxy;
385    type Protocol = AllocatorMarker;
386
387    fn from_channel(inner: fidl::Channel) -> Self {
388        Self::new(inner)
389    }
390
391    fn into_channel(self) -> fidl::Channel {
392        self.client.into_channel()
393    }
394
395    fn as_channel(&self) -> &fidl::Channel {
396        self.client.as_channel()
397    }
398}
399
400#[cfg(target_os = "fuchsia")]
401impl AllocatorSynchronousProxy {
402    pub fn new(channel: fidl::Channel) -> Self {
403        let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
404        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
405    }
406
407    pub fn into_channel(self) -> fidl::Channel {
408        self.client.into_channel()
409    }
410
411    /// Waits until an event arrives and returns it. It is safe for other
412    /// threads to make concurrent requests while waiting for an event.
413    pub fn wait_for_event(
414        &self,
415        deadline: zx::MonotonicInstant,
416    ) -> Result<AllocatorEvent, fidl::Error> {
417        AllocatorEvent::decode(self.client.wait_for_event(deadline)?)
418    }
419
420    /// Allocates a buffer collection on behalf of a single client (aka
421    /// initiator) who is also the only participant (from the point of view of
422    /// sysmem).
423    ///
424    /// This call exists mainly for temp/testing purposes.  This call skips the
425    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
426    /// allow another participant to specify its constraints.
427    ///
428    /// Real clients are encouraged to use
429    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
430    /// let relevant participants directly convey their own constraints to
431    /// sysmem by sending `BufferCollectionToken`s to those participants.
432    ///
433    /// + request `collection_request` The server end of the
434    ///   [`fuchsia.sysmem2/BufferCollection`].
435    pub fn r#allocate_non_shared_collection(
436        &self,
437        mut payload: AllocatorAllocateNonSharedCollectionRequest,
438    ) -> Result<(), fidl::Error> {
439        self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
440            &mut payload,
441            0x5ca681f025a80e44,
442            fidl::encoding::DynamicFlags::FLEXIBLE,
443        )
444    }
445
446    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
447    ///
448    /// The `BufferCollectionToken` can be "duplicated" for distribution to
449    /// participants by using
450    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
451    /// `BufferCollectionToken` can be converted into a
452    /// [`fuchsia.sysmem2.BufferCollection`] using
453    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
454    ///
455    /// Buffer constraints can be set via
456    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
457    ///
458    /// Success/failure to populate the buffer collection with buffers can be
459    /// determined from
460    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
461    ///
462    /// Closing the client end of a `BufferCollectionToken` or
463    /// `BufferCollection` (without `Release` first) will fail all client ends
464    /// in the same failure domain, which by default is all client ends of the
465    /// buffer collection. See
466    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
467    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
468    /// separate failure domains within a buffer collection.
469    pub fn r#allocate_shared_collection(
470        &self,
471        mut payload: AllocatorAllocateSharedCollectionRequest,
472    ) -> Result<(), fidl::Error> {
473        self.client.send::<AllocatorAllocateSharedCollectionRequest>(
474            &mut payload,
475            0x11a19ff51f0b49c1,
476            fidl::encoding::DynamicFlags::FLEXIBLE,
477        )
478    }
479
480    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
481    /// [`fuchsia.sysmem2/BufferCollection`].
482    ///
483    /// At the time of sending this message, the buffer collection hasn't yet
484    /// been populated with buffers - the participant must first also send
485    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
486    /// `BufferCollection` client end.
487    ///
488    /// All `BufferCollectionToken`(s) duplicated from a root
489    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
490    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
491    /// existing `BufferCollection` client ends must have sent `SetConstraints`
492    /// before the logical BufferCollection will be populated with buffers (or
493    /// will fail if the overall set of constraints can't be satisfied).
494    ///
495    /// + request `token` The client endpoint of a channel whose server end was
496    ///   sent to sysmem using
497    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
498    ///   end was sent to sysmem using
499    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
500    ///   being "turned in" in exchange for a
501    ///   [`fuchsia.sysmem2/BufferCollection`].
502    /// + request `buffer_collection_request` The server end of a
503    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
504    ///   client end. The `BufferCollection` channel is a single participant's
505    ///   connection to the logical buffer collection. Typically there will be
506    ///   other participants with their own `BufferCollection` channel to the
507    ///   logical buffer collection.
508    pub fn r#bind_shared_collection(
509        &self,
510        mut payload: AllocatorBindSharedCollectionRequest,
511    ) -> Result<(), fidl::Error> {
512        self.client.send::<AllocatorBindSharedCollectionRequest>(
513            &mut payload,
514            0x550916b0dc1d5b4e,
515            fidl::encoding::DynamicFlags::FLEXIBLE,
516        )
517    }
518
519    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
520    /// the sysmem server.
521    ///
522    /// With this call, the client can determine whether an incoming token is a
523    /// real sysmem token that is known to the sysmem server, without any risk
524    /// of getting stuck waiting forever on a potentially fake token to complete
525    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
526    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
527    /// FIDL message). In cases where the client trusts the source of the token
528    /// to provide a real token, this call is not typically needed outside of
529    /// debugging.
530    ///
531    /// If the validate fails sometimes but succeeds other times, the source of
532    /// the token may itself not be calling
533    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
534    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
535    /// token but before sending the token to the current client. It may be more
536    /// convenient for the source to use
537    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
538    /// token(s), since that call has the sync step built in. Or, the buffer
539    /// collection may be failing before this call is processed by the sysmem
540    /// server, as buffer collection failure cleans up sysmem's tracking of
541    /// associated tokens.
542    ///
543    /// This call has no effect on any token.
544    ///
545    /// + request `token_server_koid` The koid of the server end of a channel
546    ///   that might be a BufferCollectionToken channel.  This can be obtained
547    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
548    /// - response `is_known` true means sysmem knew of the token at the time
549    ///   sysmem processed the request, but doesn't guarantee that the token is
550    ///   still valid by the time the client receives the reply. What it does
551    ///   guarantee is that the token at least was a real token, so a two-way
552    ///   call to the token won't stall forever (will fail or succeed fairly
553    ///   quickly, not stall). This can already be known implicitly if the
554    ///   source of the token can be trusted to provide a real token. A false
555    ///   value means the token wasn't known to sysmem at the time sysmem
556    ///   processed this call, but the token may have previously been valid, or
557    ///   may yet become valid. Or if the sender of the token isn't trusted to
558    ///   provide a real token, the token may be fake. It's the responsibility
559    ///   of the sender to sync with sysmem to ensure that previously
560    ///   created/duplicated token(s) are known to sysmem, before sending the
561    ///   token(s) to other participants.
562    pub fn r#validate_buffer_collection_token(
563        &self,
564        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
565        ___deadline: zx::MonotonicInstant,
566    ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
567        let _response = self.client.send_query::<
568            AllocatorValidateBufferCollectionTokenRequest,
569            fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
570        >(
571            payload,
572            0x4c5ee91b02a7e68d,
573            fidl::encoding::DynamicFlags::FLEXIBLE,
574            ___deadline,
575        )?
576        .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
577        Ok(_response)
578    }
579
580    /// Set information about the current client that can be used by sysmem to
581    /// help diagnose leaking memory and allocation stalls waiting for a
582    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
583    ///
584    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
585    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
586    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
587    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
588    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
589    /// these `BufferCollection`(s) have the same initial debug client info as
590    /// the token turned in to create the `BufferCollection`).
591    ///
592    /// This info can be subsequently overridden on a per-`Node` basis by
593    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
594    ///
595    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
596    /// `Allocator` is the most efficient way to ensure that all
597    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
598    /// set, and is also more efficient than separately sending the same debug
599    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
600    /// created [`fuchsia.sysmem2/Node`].
601    ///
602    /// + request `name` This can be an arbitrary string, but the current
603    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
604    /// + request `id` This can be an arbitrary id, but the current process ID
605    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
606    pub fn r#set_debug_client_info(
607        &self,
608        mut payload: &AllocatorSetDebugClientInfoRequest,
609    ) -> Result<(), fidl::Error> {
610        self.client.send::<AllocatorSetDebugClientInfoRequest>(
611            payload,
612            0x6f68f19a3f509c4d,
613            fidl::encoding::DynamicFlags::FLEXIBLE,
614        )
615    }
616
617    /// Given a handle to a sysmem-provided VMO, this returns additional info
618    /// about the corresponding sysmem logical buffer.
619    ///
620    /// Most callers will duplicate a VMO handle first and send the duplicate to
621    /// this call.
622    ///
623    /// If the client has created a child VMO of a sysmem-provided VMO, that
624    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
625    ///
626    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
627    /// - response `buffer_collection_id` The buffer collection ID, which is
628    ///   unique per logical buffer collection per boot.
629    /// - response `buffer_index` The buffer index of the buffer within the
630    ///   buffer collection. This is the same as the index of the buffer within
631    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
632    ///   is the same for all sysmem-delivered VMOs corresponding to the same
633    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
634    ///   only unique across buffers of a buffer collection. For a given buffer,
635    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
636    ///   per boot.
637    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
638    ///   the `close_weak_asap` field will be set in the response. This handle
639    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
640    ///   the buffer should be closed as soon as possible. This is signalled
641    ///   shortly after all strong sysmem VMOs to the buffer are closed
642    ///   (including any held indirectly via strong `BufferCollectionToken` or
643    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
644    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
645    ///   considered a VMO leak caused by the client still holding a weak sysmem
646    ///   VMO handle and results in loud complaints to the log by sysmem. The
647    ///   buffers of a collection can be freed independently of each other. The
648    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
649    ///   response arrives at the client. A client that isn't prepared to handle
650    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
651    ///   the buffer and fail any associated request.
652    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
653    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
654    ///   the VMO handle passed in to this call itself keeps the VMO's info
655    ///   alive for purposes of responding to this call. Because of this,
656    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
657    ///   handles to the VMO when calling; even if other handles are closed
658    ///   before the GetVmoInfo response arrives at the client).
659    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
660    ///   capable of being used with GetVmoInfo due to rights/capability
661    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
662    ///   topic [`ZX_INFO_HANDLE_BASIC`].
663    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
664    ///   unspecified reason. See the log for more info.
665    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
666    ///   wasn't set, or there was some other problem with the request field(s).
667    pub fn r#get_vmo_info(
668        &self,
669        mut payload: AllocatorGetVmoInfoRequest,
670        ___deadline: zx::MonotonicInstant,
671    ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
672        let _response = self.client.send_query::<
673            AllocatorGetVmoInfoRequest,
674            fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
675        >(
676            &mut payload,
677            0x21a881120aa0ddf9,
678            fidl::encoding::DynamicFlags::FLEXIBLE,
679            ___deadline,
680        )?
681        .into_result::<AllocatorMarker>("get_vmo_info")?;
682        Ok(_response.map(|x| x))
683    }
684}
685
686#[cfg(target_os = "fuchsia")]
687impl From<AllocatorSynchronousProxy> for zx::NullableHandle {
688    fn from(value: AllocatorSynchronousProxy) -> Self {
689        value.into_channel().into()
690    }
691}
692
693#[cfg(target_os = "fuchsia")]
694impl From<fidl::Channel> for AllocatorSynchronousProxy {
695    fn from(value: fidl::Channel) -> Self {
696        Self::new(value)
697    }
698}
699
700#[cfg(target_os = "fuchsia")]
701impl fidl::endpoints::FromClient for AllocatorSynchronousProxy {
702    type Protocol = AllocatorMarker;
703
704    fn from_client(value: fidl::endpoints::ClientEnd<AllocatorMarker>) -> Self {
705        Self::new(value.into_channel())
706    }
707}
708
709#[derive(Debug, Clone)]
710pub struct AllocatorProxy {
711    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
712}
713
714impl fidl::endpoints::Proxy for AllocatorProxy {
715    type Protocol = AllocatorMarker;
716
717    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
718        Self::new(inner)
719    }
720
721    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
722        self.client.into_channel().map_err(|client| Self { client })
723    }
724
725    fn as_channel(&self) -> &::fidl::AsyncChannel {
726        self.client.as_channel()
727    }
728}
729
730impl AllocatorProxy {
731    /// Create a new Proxy for fuchsia.sysmem2/Allocator.
732    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
733        let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
734        Self { client: fidl::client::Client::new(channel, protocol_name) }
735    }
736
737    /// Get a Stream of events from the remote end of the protocol.
738    ///
739    /// # Panics
740    ///
741    /// Panics if the event stream was already taken.
742    pub fn take_event_stream(&self) -> AllocatorEventStream {
743        AllocatorEventStream { event_receiver: self.client.take_event_receiver() }
744    }
745
746    /// Allocates a buffer collection on behalf of a single client (aka
747    /// initiator) who is also the only participant (from the point of view of
748    /// sysmem).
749    ///
750    /// This call exists mainly for temp/testing purposes.  This call skips the
751    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
752    /// allow another participant to specify its constraints.
753    ///
754    /// Real clients are encouraged to use
755    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
756    /// let relevant participants directly convey their own constraints to
757    /// sysmem by sending `BufferCollectionToken`s to those participants.
758    ///
759    /// + request `collection_request` The server end of the
760    ///   [`fuchsia.sysmem2/BufferCollection`].
761    pub fn r#allocate_non_shared_collection(
762        &self,
763        mut payload: AllocatorAllocateNonSharedCollectionRequest,
764    ) -> Result<(), fidl::Error> {
765        AllocatorProxyInterface::r#allocate_non_shared_collection(self, payload)
766    }
767
768    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
769    ///
770    /// The `BufferCollectionToken` can be "duplicated" for distribution to
771    /// participants by using
772    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
773    /// `BufferCollectionToken` can be converted into a
774    /// [`fuchsia.sysmem2.BufferCollection`] using
775    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
776    ///
777    /// Buffer constraints can be set via
778    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
779    ///
780    /// Success/failure to populate the buffer collection with buffers can be
781    /// determined from
782    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
783    ///
784    /// Closing the client end of a `BufferCollectionToken` or
785    /// `BufferCollection` (without `Release` first) will fail all client ends
786    /// in the same failure domain, which by default is all client ends of the
787    /// buffer collection. See
788    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
789    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
790    /// separate failure domains within a buffer collection.
791    pub fn r#allocate_shared_collection(
792        &self,
793        mut payload: AllocatorAllocateSharedCollectionRequest,
794    ) -> Result<(), fidl::Error> {
795        AllocatorProxyInterface::r#allocate_shared_collection(self, payload)
796    }
797
798    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
799    /// [`fuchsia.sysmem2/BufferCollection`].
800    ///
801    /// At the time of sending this message, the buffer collection hasn't yet
802    /// been populated with buffers - the participant must first also send
803    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
804    /// `BufferCollection` client end.
805    ///
806    /// All `BufferCollectionToken`(s) duplicated from a root
807    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
808    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
809    /// existing `BufferCollection` client ends must have sent `SetConstraints`
810    /// before the logical BufferCollection will be populated with buffers (or
811    /// will fail if the overall set of constraints can't be satisfied).
812    ///
813    /// + request `token` The client endpoint of a channel whose server end was
814    ///   sent to sysmem using
815    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
816    ///   end was sent to sysmem using
817    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
818    ///   being "turned in" in exchange for a
819    ///   [`fuchsia.sysmem2/BufferCollection`].
820    /// + request `buffer_collection_request` The server end of a
821    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
822    ///   client end. The `BufferCollection` channel is a single participant's
823    ///   connection to the logical buffer collection. Typically there will be
824    ///   other participants with their own `BufferCollection` channel to the
825    ///   logical buffer collection.
826    pub fn r#bind_shared_collection(
827        &self,
828        mut payload: AllocatorBindSharedCollectionRequest,
829    ) -> Result<(), fidl::Error> {
830        AllocatorProxyInterface::r#bind_shared_collection(self, payload)
831    }
832
833    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
834    /// the sysmem server.
835    ///
836    /// With this call, the client can determine whether an incoming token is a
837    /// real sysmem token that is known to the sysmem server, without any risk
838    /// of getting stuck waiting forever on a potentially fake token to complete
839    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
840    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
841    /// FIDL message). In cases where the client trusts the source of the token
842    /// to provide a real token, this call is not typically needed outside of
843    /// debugging.
844    ///
845    /// If the validate fails sometimes but succeeds other times, the source of
846    /// the token may itself not be calling
847    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
848    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
849    /// token but before sending the token to the current client. It may be more
850    /// convenient for the source to use
851    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
852    /// token(s), since that call has the sync step built in. Or, the buffer
853    /// collection may be failing before this call is processed by the sysmem
854    /// server, as buffer collection failure cleans up sysmem's tracking of
855    /// associated tokens.
856    ///
857    /// This call has no effect on any token.
858    ///
859    /// + request `token_server_koid` The koid of the server end of a channel
860    ///   that might be a BufferCollectionToken channel.  This can be obtained
861    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
862    /// - response `is_known` true means sysmem knew of the token at the time
863    ///   sysmem processed the request, but doesn't guarantee that the token is
864    ///   still valid by the time the client receives the reply. What it does
865    ///   guarantee is that the token at least was a real token, so a two-way
866    ///   call to the token won't stall forever (will fail or succeed fairly
867    ///   quickly, not stall). This can already be known implicitly if the
868    ///   source of the token can be trusted to provide a real token. A false
869    ///   value means the token wasn't known to sysmem at the time sysmem
870    ///   processed this call, but the token may have previously been valid, or
871    ///   may yet become valid. Or if the sender of the token isn't trusted to
872    ///   provide a real token, the token may be fake. It's the responsibility
873    ///   of the sender to sync with sysmem to ensure that previously
874    ///   created/duplicated token(s) are known to sysmem, before sending the
875    ///   token(s) to other participants.
876    pub fn r#validate_buffer_collection_token(
877        &self,
878        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
879    ) -> fidl::client::QueryResponseFut<
880        AllocatorValidateBufferCollectionTokenResponse,
881        fidl::encoding::DefaultFuchsiaResourceDialect,
882    > {
883        AllocatorProxyInterface::r#validate_buffer_collection_token(self, payload)
884    }
885
886    /// Set information about the current client that can be used by sysmem to
887    /// help diagnose leaking memory and allocation stalls waiting for a
888    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
889    ///
890    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
891    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
892    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
893    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
894    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
895    /// these `BufferCollection`(s) have the same initial debug client info as
896    /// the token turned in to create the `BufferCollection`).
897    ///
898    /// This info can be subsequently overridden on a per-`Node` basis by
899    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
900    ///
901    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
902    /// `Allocator` is the most efficient way to ensure that all
903    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
904    /// set, and is also more efficient than separately sending the same debug
905    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
906    /// created [`fuchsia.sysmem2/Node`].
907    ///
908    /// + request `name` This can be an arbitrary string, but the current
909    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
910    /// + request `id` This can be an arbitrary id, but the current process ID
911    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
912    pub fn r#set_debug_client_info(
913        &self,
914        mut payload: &AllocatorSetDebugClientInfoRequest,
915    ) -> Result<(), fidl::Error> {
916        AllocatorProxyInterface::r#set_debug_client_info(self, payload)
917    }
918
919    /// Given a handle to a sysmem-provided VMO, this returns additional info
920    /// about the corresponding sysmem logical buffer.
921    ///
922    /// Most callers will duplicate a VMO handle first and send the duplicate to
923    /// this call.
924    ///
925    /// If the client has created a child VMO of a sysmem-provided VMO, that
926    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
927    ///
928    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
929    /// - response `buffer_collection_id` The buffer collection ID, which is
930    ///   unique per logical buffer collection per boot.
931    /// - response `buffer_index` The buffer index of the buffer within the
932    ///   buffer collection. This is the same as the index of the buffer within
933    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
934    ///   is the same for all sysmem-delivered VMOs corresponding to the same
935    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
936    ///   only unique across buffers of a buffer collection. For a given buffer,
937    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
938    ///   per boot.
939    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
940    ///   the `close_weak_asap` field will be set in the response. This handle
941    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
942    ///   the buffer should be closed as soon as possible. This is signalled
943    ///   shortly after all strong sysmem VMOs to the buffer are closed
944    ///   (including any held indirectly via strong `BufferCollectionToken` or
945    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
946    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
947    ///   considered a VMO leak caused by the client still holding a weak sysmem
948    ///   VMO handle and results in loud complaints to the log by sysmem. The
949    ///   buffers of a collection can be freed independently of each other. The
950    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
951    ///   response arrives at the client. A client that isn't prepared to handle
952    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
953    ///   the buffer and fail any associated request.
954    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
955    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
956    ///   the VMO handle passed in to this call itself keeps the VMO's info
957    ///   alive for purposes of responding to this call. Because of this,
958    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
959    ///   handles to the VMO when calling; even if other handles are closed
960    ///   before the GetVmoInfo response arrives at the client).
961    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
962    ///   capable of being used with GetVmoInfo due to rights/capability
963    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
964    ///   topic [`ZX_INFO_HANDLE_BASIC`].
965    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
966    ///   unspecified reason. See the log for more info.
967    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
968    ///   wasn't set, or there was some other problem with the request field(s).
969    pub fn r#get_vmo_info(
970        &self,
971        mut payload: AllocatorGetVmoInfoRequest,
972    ) -> fidl::client::QueryResponseFut<
973        AllocatorGetVmoInfoResult,
974        fidl::encoding::DefaultFuchsiaResourceDialect,
975    > {
976        AllocatorProxyInterface::r#get_vmo_info(self, payload)
977    }
978}
979
980impl AllocatorProxyInterface for AllocatorProxy {
981    fn r#allocate_non_shared_collection(
982        &self,
983        mut payload: AllocatorAllocateNonSharedCollectionRequest,
984    ) -> Result<(), fidl::Error> {
985        self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
986            &mut payload,
987            0x5ca681f025a80e44,
988            fidl::encoding::DynamicFlags::FLEXIBLE,
989        )
990    }
991
992    fn r#allocate_shared_collection(
993        &self,
994        mut payload: AllocatorAllocateSharedCollectionRequest,
995    ) -> Result<(), fidl::Error> {
996        self.client.send::<AllocatorAllocateSharedCollectionRequest>(
997            &mut payload,
998            0x11a19ff51f0b49c1,
999            fidl::encoding::DynamicFlags::FLEXIBLE,
1000        )
1001    }
1002
1003    fn r#bind_shared_collection(
1004        &self,
1005        mut payload: AllocatorBindSharedCollectionRequest,
1006    ) -> Result<(), fidl::Error> {
1007        self.client.send::<AllocatorBindSharedCollectionRequest>(
1008            &mut payload,
1009            0x550916b0dc1d5b4e,
1010            fidl::encoding::DynamicFlags::FLEXIBLE,
1011        )
1012    }
1013
1014    type ValidateBufferCollectionTokenResponseFut = fidl::client::QueryResponseFut<
1015        AllocatorValidateBufferCollectionTokenResponse,
1016        fidl::encoding::DefaultFuchsiaResourceDialect,
1017    >;
1018    fn r#validate_buffer_collection_token(
1019        &self,
1020        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
1021    ) -> Self::ValidateBufferCollectionTokenResponseFut {
1022        fn _decode(
1023            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1024        ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
1025            let _response = fidl::client::decode_transaction_body::<
1026                fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
1027                fidl::encoding::DefaultFuchsiaResourceDialect,
1028                0x4c5ee91b02a7e68d,
1029            >(_buf?)?
1030            .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
1031            Ok(_response)
1032        }
1033        self.client.send_query_and_decode::<
1034            AllocatorValidateBufferCollectionTokenRequest,
1035            AllocatorValidateBufferCollectionTokenResponse,
1036        >(
1037            payload,
1038            0x4c5ee91b02a7e68d,
1039            fidl::encoding::DynamicFlags::FLEXIBLE,
1040            _decode,
1041        )
1042    }
1043
1044    fn r#set_debug_client_info(
1045        &self,
1046        mut payload: &AllocatorSetDebugClientInfoRequest,
1047    ) -> Result<(), fidl::Error> {
1048        self.client.send::<AllocatorSetDebugClientInfoRequest>(
1049            payload,
1050            0x6f68f19a3f509c4d,
1051            fidl::encoding::DynamicFlags::FLEXIBLE,
1052        )
1053    }
1054
1055    type GetVmoInfoResponseFut = fidl::client::QueryResponseFut<
1056        AllocatorGetVmoInfoResult,
1057        fidl::encoding::DefaultFuchsiaResourceDialect,
1058    >;
1059    fn r#get_vmo_info(
1060        &self,
1061        mut payload: AllocatorGetVmoInfoRequest,
1062    ) -> Self::GetVmoInfoResponseFut {
1063        fn _decode(
1064            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1065        ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
1066            let _response = fidl::client::decode_transaction_body::<
1067                fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
1068                fidl::encoding::DefaultFuchsiaResourceDialect,
1069                0x21a881120aa0ddf9,
1070            >(_buf?)?
1071            .into_result::<AllocatorMarker>("get_vmo_info")?;
1072            Ok(_response.map(|x| x))
1073        }
1074        self.client.send_query_and_decode::<AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResult>(
1075            &mut payload,
1076            0x21a881120aa0ddf9,
1077            fidl::encoding::DynamicFlags::FLEXIBLE,
1078            _decode,
1079        )
1080    }
1081}
1082
1083pub struct AllocatorEventStream {
1084    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
1085}
1086
1087impl std::marker::Unpin for AllocatorEventStream {}
1088
1089impl futures::stream::FusedStream for AllocatorEventStream {
1090    fn is_terminated(&self) -> bool {
1091        self.event_receiver.is_terminated()
1092    }
1093}
1094
1095impl futures::Stream for AllocatorEventStream {
1096    type Item = Result<AllocatorEvent, fidl::Error>;
1097
1098    fn poll_next(
1099        mut self: std::pin::Pin<&mut Self>,
1100        cx: &mut std::task::Context<'_>,
1101    ) -> std::task::Poll<Option<Self::Item>> {
1102        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
1103            &mut self.event_receiver,
1104            cx
1105        )?) {
1106            Some(buf) => std::task::Poll::Ready(Some(AllocatorEvent::decode(buf))),
1107            None => std::task::Poll::Ready(None),
1108        }
1109    }
1110}
1111
1112#[derive(Debug)]
1113pub enum AllocatorEvent {
1114    #[non_exhaustive]
1115    _UnknownEvent {
1116        /// Ordinal of the event that was sent.
1117        ordinal: u64,
1118    },
1119}
1120
1121impl AllocatorEvent {
1122    /// Decodes a message buffer as a [`AllocatorEvent`].
1123    fn decode(
1124        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
1125    ) -> Result<AllocatorEvent, fidl::Error> {
1126        let (bytes, _handles) = buf.split_mut();
1127        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1128        debug_assert_eq!(tx_header.tx_id, 0);
1129        match tx_header.ordinal {
1130            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
1131                Ok(AllocatorEvent::_UnknownEvent { ordinal: tx_header.ordinal })
1132            }
1133            _ => Err(fidl::Error::UnknownOrdinal {
1134                ordinal: tx_header.ordinal,
1135                protocol_name: <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1136            }),
1137        }
1138    }
1139}
1140
1141/// A Stream of incoming requests for fuchsia.sysmem2/Allocator.
1142pub struct AllocatorRequestStream {
1143    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1144    is_terminated: bool,
1145}
1146
1147impl std::marker::Unpin for AllocatorRequestStream {}
1148
1149impl futures::stream::FusedStream for AllocatorRequestStream {
1150    fn is_terminated(&self) -> bool {
1151        self.is_terminated
1152    }
1153}
1154
1155impl fidl::endpoints::RequestStream for AllocatorRequestStream {
1156    type Protocol = AllocatorMarker;
1157    type ControlHandle = AllocatorControlHandle;
1158
1159    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
1160        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
1161    }
1162
1163    fn control_handle(&self) -> Self::ControlHandle {
1164        AllocatorControlHandle { inner: self.inner.clone() }
1165    }
1166
1167    fn into_inner(
1168        self,
1169    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
1170    {
1171        (self.inner, self.is_terminated)
1172    }
1173
1174    fn from_inner(
1175        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1176        is_terminated: bool,
1177    ) -> Self {
1178        Self { inner, is_terminated }
1179    }
1180}
1181
1182impl futures::Stream for AllocatorRequestStream {
1183    type Item = Result<AllocatorRequest, fidl::Error>;
1184
1185    fn poll_next(
1186        mut self: std::pin::Pin<&mut Self>,
1187        cx: &mut std::task::Context<'_>,
1188    ) -> std::task::Poll<Option<Self::Item>> {
1189        let this = &mut *self;
1190        if this.inner.check_shutdown(cx) {
1191            this.is_terminated = true;
1192            return std::task::Poll::Ready(None);
1193        }
1194        if this.is_terminated {
1195            panic!("polled AllocatorRequestStream after completion");
1196        }
1197        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
1198            |bytes, handles| {
1199                match this.inner.channel().read_etc(cx, bytes, handles) {
1200                    std::task::Poll::Ready(Ok(())) => {}
1201                    std::task::Poll::Pending => return std::task::Poll::Pending,
1202                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
1203                        this.is_terminated = true;
1204                        return std::task::Poll::Ready(None);
1205                    }
1206                    std::task::Poll::Ready(Err(e)) => {
1207                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
1208                            e.into(),
1209                        ))));
1210                    }
1211                }
1212
1213                // A message has been received from the channel
1214                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1215
1216                std::task::Poll::Ready(Some(match header.ordinal {
1217                    0x5ca681f025a80e44 => {
1218                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1219                        let mut req = fidl::new_empty!(
1220                            AllocatorAllocateNonSharedCollectionRequest,
1221                            fidl::encoding::DefaultFuchsiaResourceDialect
1222                        );
1223                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateNonSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1224                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1225                        Ok(AllocatorRequest::AllocateNonSharedCollection {
1226                            payload: req,
1227                            control_handle,
1228                        })
1229                    }
1230                    0x11a19ff51f0b49c1 => {
1231                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1232                        let mut req = fidl::new_empty!(
1233                            AllocatorAllocateSharedCollectionRequest,
1234                            fidl::encoding::DefaultFuchsiaResourceDialect
1235                        );
1236                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1237                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1238                        Ok(AllocatorRequest::AllocateSharedCollection {
1239                            payload: req,
1240                            control_handle,
1241                        })
1242                    }
1243                    0x550916b0dc1d5b4e => {
1244                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1245                        let mut req = fidl::new_empty!(
1246                            AllocatorBindSharedCollectionRequest,
1247                            fidl::encoding::DefaultFuchsiaResourceDialect
1248                        );
1249                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorBindSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1250                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1251                        Ok(AllocatorRequest::BindSharedCollection { payload: req, control_handle })
1252                    }
1253                    0x4c5ee91b02a7e68d => {
1254                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1255                        let mut req = fidl::new_empty!(
1256                            AllocatorValidateBufferCollectionTokenRequest,
1257                            fidl::encoding::DefaultFuchsiaResourceDialect
1258                        );
1259                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorValidateBufferCollectionTokenRequest>(&header, _body_bytes, handles, &mut req)?;
1260                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1261                        Ok(AllocatorRequest::ValidateBufferCollectionToken {
1262                            payload: req,
1263                            responder: AllocatorValidateBufferCollectionTokenResponder {
1264                                control_handle: std::mem::ManuallyDrop::new(control_handle),
1265                                tx_id: header.tx_id,
1266                            },
1267                        })
1268                    }
1269                    0x6f68f19a3f509c4d => {
1270                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1271                        let mut req = fidl::new_empty!(
1272                            AllocatorSetDebugClientInfoRequest,
1273                            fidl::encoding::DefaultFuchsiaResourceDialect
1274                        );
1275                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1276                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1277                        Ok(AllocatorRequest::SetDebugClientInfo { payload: req, control_handle })
1278                    }
1279                    0x21a881120aa0ddf9 => {
1280                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1281                        let mut req = fidl::new_empty!(
1282                            AllocatorGetVmoInfoRequest,
1283                            fidl::encoding::DefaultFuchsiaResourceDialect
1284                        );
1285                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorGetVmoInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1286                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1287                        Ok(AllocatorRequest::GetVmoInfo {
1288                            payload: req,
1289                            responder: AllocatorGetVmoInfoResponder {
1290                                control_handle: std::mem::ManuallyDrop::new(control_handle),
1291                                tx_id: header.tx_id,
1292                            },
1293                        })
1294                    }
1295                    _ if header.tx_id == 0
1296                        && header
1297                            .dynamic_flags()
1298                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1299                    {
1300                        Ok(AllocatorRequest::_UnknownMethod {
1301                            ordinal: header.ordinal,
1302                            control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1303                            method_type: fidl::MethodType::OneWay,
1304                        })
1305                    }
1306                    _ if header
1307                        .dynamic_flags()
1308                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1309                    {
1310                        this.inner.send_framework_err(
1311                            fidl::encoding::FrameworkErr::UnknownMethod,
1312                            header.tx_id,
1313                            header.ordinal,
1314                            header.dynamic_flags(),
1315                            (bytes, handles),
1316                        )?;
1317                        Ok(AllocatorRequest::_UnknownMethod {
1318                            ordinal: header.ordinal,
1319                            control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1320                            method_type: fidl::MethodType::TwoWay,
1321                        })
1322                    }
1323                    _ => Err(fidl::Error::UnknownOrdinal {
1324                        ordinal: header.ordinal,
1325                        protocol_name:
1326                            <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1327                    }),
1328                }))
1329            },
1330        )
1331    }
1332}
1333
1334/// Allocates system memory buffers.
1335///
1336/// Epitaphs are not used in this protocol.
1337#[derive(Debug)]
1338pub enum AllocatorRequest {
1339    /// Allocates a buffer collection on behalf of a single client (aka
1340    /// initiator) who is also the only participant (from the point of view of
1341    /// sysmem).
1342    ///
1343    /// This call exists mainly for temp/testing purposes.  This call skips the
1344    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
1345    /// allow another participant to specify its constraints.
1346    ///
1347    /// Real clients are encouraged to use
1348    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
1349    /// let relevant participants directly convey their own constraints to
1350    /// sysmem by sending `BufferCollectionToken`s to those participants.
1351    ///
1352    /// + request `collection_request` The server end of the
1353    ///   [`fuchsia.sysmem2/BufferCollection`].
1354    AllocateNonSharedCollection {
1355        payload: AllocatorAllocateNonSharedCollectionRequest,
1356        control_handle: AllocatorControlHandle,
1357    },
1358    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
1359    ///
1360    /// The `BufferCollectionToken` can be "duplicated" for distribution to
1361    /// participants by using
1362    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
1363    /// `BufferCollectionToken` can be converted into a
1364    /// [`fuchsia.sysmem2.BufferCollection`] using
1365    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
1366    ///
1367    /// Buffer constraints can be set via
1368    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1369    ///
1370    /// Success/failure to populate the buffer collection with buffers can be
1371    /// determined from
1372    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
1373    ///
1374    /// Closing the client end of a `BufferCollectionToken` or
1375    /// `BufferCollection` (without `Release` first) will fail all client ends
1376    /// in the same failure domain, which by default is all client ends of the
1377    /// buffer collection. See
1378    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
1379    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
1380    /// separate failure domains within a buffer collection.
1381    AllocateSharedCollection {
1382        payload: AllocatorAllocateSharedCollectionRequest,
1383        control_handle: AllocatorControlHandle,
1384    },
1385    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
1386    /// [`fuchsia.sysmem2/BufferCollection`].
1387    ///
1388    /// At the time of sending this message, the buffer collection hasn't yet
1389    /// been populated with buffers - the participant must first also send
1390    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
1391    /// `BufferCollection` client end.
1392    ///
1393    /// All `BufferCollectionToken`(s) duplicated from a root
1394    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
1395    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
1396    /// existing `BufferCollection` client ends must have sent `SetConstraints`
1397    /// before the logical BufferCollection will be populated with buffers (or
1398    /// will fail if the overall set of constraints can't be satisfied).
1399    ///
1400    /// + request `token` The client endpoint of a channel whose server end was
1401    ///   sent to sysmem using
1402    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
1403    ///   end was sent to sysmem using
1404    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
1405    ///   being "turned in" in exchange for a
1406    ///   [`fuchsia.sysmem2/BufferCollection`].
1407    /// + request `buffer_collection_request` The server end of a
1408    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
1409    ///   client end. The `BufferCollection` channel is a single participant's
1410    ///   connection to the logical buffer collection. Typically there will be
1411    ///   other participants with their own `BufferCollection` channel to the
1412    ///   logical buffer collection.
1413    BindSharedCollection {
1414        payload: AllocatorBindSharedCollectionRequest,
1415        control_handle: AllocatorControlHandle,
1416    },
1417    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
1418    /// the sysmem server.
1419    ///
1420    /// With this call, the client can determine whether an incoming token is a
1421    /// real sysmem token that is known to the sysmem server, without any risk
1422    /// of getting stuck waiting forever on a potentially fake token to complete
1423    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
1424    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
1425    /// FIDL message). In cases where the client trusts the source of the token
1426    /// to provide a real token, this call is not typically needed outside of
1427    /// debugging.
1428    ///
1429    /// If the validate fails sometimes but succeeds other times, the source of
1430    /// the token may itself not be calling
1431    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
1432    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
1433    /// token but before sending the token to the current client. It may be more
1434    /// convenient for the source to use
1435    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
1436    /// token(s), since that call has the sync step built in. Or, the buffer
1437    /// collection may be failing before this call is processed by the sysmem
1438    /// server, as buffer collection failure cleans up sysmem's tracking of
1439    /// associated tokens.
1440    ///
1441    /// This call has no effect on any token.
1442    ///
1443    /// + request `token_server_koid` The koid of the server end of a channel
1444    ///   that might be a BufferCollectionToken channel.  This can be obtained
1445    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
1446    /// - response `is_known` true means sysmem knew of the token at the time
1447    ///   sysmem processed the request, but doesn't guarantee that the token is
1448    ///   still valid by the time the client receives the reply. What it does
1449    ///   guarantee is that the token at least was a real token, so a two-way
1450    ///   call to the token won't stall forever (will fail or succeed fairly
1451    ///   quickly, not stall). This can already be known implicitly if the
1452    ///   source of the token can be trusted to provide a real token. A false
1453    ///   value means the token wasn't known to sysmem at the time sysmem
1454    ///   processed this call, but the token may have previously been valid, or
1455    ///   may yet become valid. Or if the sender of the token isn't trusted to
1456    ///   provide a real token, the token may be fake. It's the responsibility
1457    ///   of the sender to sync with sysmem to ensure that previously
1458    ///   created/duplicated token(s) are known to sysmem, before sending the
1459    ///   token(s) to other participants.
1460    ValidateBufferCollectionToken {
1461        payload: AllocatorValidateBufferCollectionTokenRequest,
1462        responder: AllocatorValidateBufferCollectionTokenResponder,
1463    },
1464    /// Set information about the current client that can be used by sysmem to
1465    /// help diagnose leaking memory and allocation stalls waiting for a
1466    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1467    ///
1468    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
1469    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
1470    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
1471    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
1472    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
1473    /// these `BufferCollection`(s) have the same initial debug client info as
1474    /// the token turned in to create the `BufferCollection`).
1475    ///
1476    /// This info can be subsequently overridden on a per-`Node` basis by
1477    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
1478    ///
1479    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
1480    /// `Allocator` is the most efficient way to ensure that all
1481    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
1482    /// set, and is also more efficient than separately sending the same debug
1483    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
1484    /// created [`fuchsia.sysmem2/Node`].
1485    ///
1486    /// + request `name` This can be an arbitrary string, but the current
1487    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
1488    /// + request `id` This can be an arbitrary id, but the current process ID
1489    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
1490    SetDebugClientInfo {
1491        payload: AllocatorSetDebugClientInfoRequest,
1492        control_handle: AllocatorControlHandle,
1493    },
1494    /// Given a handle to a sysmem-provided VMO, this returns additional info
1495    /// about the corresponding sysmem logical buffer.
1496    ///
1497    /// Most callers will duplicate a VMO handle first and send the duplicate to
1498    /// this call.
1499    ///
1500    /// If the client has created a child VMO of a sysmem-provided VMO, that
1501    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
1502    ///
1503    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
1504    /// - response `buffer_collection_id` The buffer collection ID, which is
1505    ///   unique per logical buffer collection per boot.
1506    /// - response `buffer_index` The buffer index of the buffer within the
1507    ///   buffer collection. This is the same as the index of the buffer within
1508    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
1509    ///   is the same for all sysmem-delivered VMOs corresponding to the same
1510    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
1511    ///   only unique across buffers of a buffer collection. For a given buffer,
1512    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
1513    ///   per boot.
1514    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
1515    ///   the `close_weak_asap` field will be set in the response. This handle
1516    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
1517    ///   the buffer should be closed as soon as possible. This is signalled
1518    ///   shortly after all strong sysmem VMOs to the buffer are closed
1519    ///   (including any held indirectly via strong `BufferCollectionToken` or
1520    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
1521    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
1522    ///   considered a VMO leak caused by the client still holding a weak sysmem
1523    ///   VMO handle and results in loud complaints to the log by sysmem. The
1524    ///   buffers of a collection can be freed independently of each other. The
1525    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
1526    ///   response arrives at the client. A client that isn't prepared to handle
1527    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
1528    ///   the buffer and fail any associated request.
1529    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
1530    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
1531    ///   the VMO handle passed in to this call itself keeps the VMO's info
1532    ///   alive for purposes of responding to this call. Because of this,
1533    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
1534    ///   handles to the VMO when calling; even if other handles are closed
1535    ///   before the GetVmoInfo response arrives at the client).
1536    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
1537    ///   capable of being used with GetVmoInfo due to rights/capability
1538    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
1539    ///   topic [`ZX_INFO_HANDLE_BASIC`].
1540    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
1541    ///   unspecified reason. See the log for more info.
1542    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
1543    ///   wasn't set, or there was some other problem with the request field(s).
1544    GetVmoInfo { payload: AllocatorGetVmoInfoRequest, responder: AllocatorGetVmoInfoResponder },
1545    /// An interaction was received which does not match any known method.
1546    #[non_exhaustive]
1547    _UnknownMethod {
1548        /// Ordinal of the method that was called.
1549        ordinal: u64,
1550        control_handle: AllocatorControlHandle,
1551        method_type: fidl::MethodType,
1552    },
1553}
1554
1555impl AllocatorRequest {
1556    #[allow(irrefutable_let_patterns)]
1557    pub fn into_allocate_non_shared_collection(
1558        self,
1559    ) -> Option<(AllocatorAllocateNonSharedCollectionRequest, AllocatorControlHandle)> {
1560        if let AllocatorRequest::AllocateNonSharedCollection { payload, control_handle } = self {
1561            Some((payload, control_handle))
1562        } else {
1563            None
1564        }
1565    }
1566
1567    #[allow(irrefutable_let_patterns)]
1568    pub fn into_allocate_shared_collection(
1569        self,
1570    ) -> Option<(AllocatorAllocateSharedCollectionRequest, AllocatorControlHandle)> {
1571        if let AllocatorRequest::AllocateSharedCollection { payload, control_handle } = self {
1572            Some((payload, control_handle))
1573        } else {
1574            None
1575        }
1576    }
1577
1578    #[allow(irrefutable_let_patterns)]
1579    pub fn into_bind_shared_collection(
1580        self,
1581    ) -> Option<(AllocatorBindSharedCollectionRequest, AllocatorControlHandle)> {
1582        if let AllocatorRequest::BindSharedCollection { payload, control_handle } = self {
1583            Some((payload, control_handle))
1584        } else {
1585            None
1586        }
1587    }
1588
1589    #[allow(irrefutable_let_patterns)]
1590    pub fn into_validate_buffer_collection_token(
1591        self,
1592    ) -> Option<(
1593        AllocatorValidateBufferCollectionTokenRequest,
1594        AllocatorValidateBufferCollectionTokenResponder,
1595    )> {
1596        if let AllocatorRequest::ValidateBufferCollectionToken { payload, responder } = self {
1597            Some((payload, responder))
1598        } else {
1599            None
1600        }
1601    }
1602
1603    #[allow(irrefutable_let_patterns)]
1604    pub fn into_set_debug_client_info(
1605        self,
1606    ) -> Option<(AllocatorSetDebugClientInfoRequest, AllocatorControlHandle)> {
1607        if let AllocatorRequest::SetDebugClientInfo { payload, control_handle } = self {
1608            Some((payload, control_handle))
1609        } else {
1610            None
1611        }
1612    }
1613
1614    #[allow(irrefutable_let_patterns)]
1615    pub fn into_get_vmo_info(
1616        self,
1617    ) -> Option<(AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResponder)> {
1618        if let AllocatorRequest::GetVmoInfo { payload, responder } = self {
1619            Some((payload, responder))
1620        } else {
1621            None
1622        }
1623    }
1624
1625    /// Name of the method defined in FIDL
1626    pub fn method_name(&self) -> &'static str {
1627        match *self {
1628            AllocatorRequest::AllocateNonSharedCollection { .. } => {
1629                "allocate_non_shared_collection"
1630            }
1631            AllocatorRequest::AllocateSharedCollection { .. } => "allocate_shared_collection",
1632            AllocatorRequest::BindSharedCollection { .. } => "bind_shared_collection",
1633            AllocatorRequest::ValidateBufferCollectionToken { .. } => {
1634                "validate_buffer_collection_token"
1635            }
1636            AllocatorRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
1637            AllocatorRequest::GetVmoInfo { .. } => "get_vmo_info",
1638            AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
1639                "unknown one-way method"
1640            }
1641            AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
1642                "unknown two-way method"
1643            }
1644        }
1645    }
1646}
1647
1648#[derive(Debug, Clone)]
1649pub struct AllocatorControlHandle {
1650    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1651}
1652
1653impl fidl::endpoints::ControlHandle for AllocatorControlHandle {
1654    fn shutdown(&self) {
1655        self.inner.shutdown()
1656    }
1657
1658    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
1659        self.inner.shutdown_with_epitaph(status)
1660    }
1661
1662    fn is_closed(&self) -> bool {
1663        self.inner.channel().is_closed()
1664    }
1665    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
1666        self.inner.channel().on_closed()
1667    }
1668
1669    #[cfg(target_os = "fuchsia")]
1670    fn signal_peer(
1671        &self,
1672        clear_mask: zx::Signals,
1673        set_mask: zx::Signals,
1674    ) -> Result<(), zx_status::Status> {
1675        use fidl::Peered;
1676        self.inner.channel().signal_peer(clear_mask, set_mask)
1677    }
1678}
1679
1680impl AllocatorControlHandle {}
1681
1682#[must_use = "FIDL methods require a response to be sent"]
1683#[derive(Debug)]
1684pub struct AllocatorValidateBufferCollectionTokenResponder {
1685    control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1686    tx_id: u32,
1687}
1688
1689/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1690/// if the responder is dropped without sending a response, so that the client
1691/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1692impl std::ops::Drop for AllocatorValidateBufferCollectionTokenResponder {
1693    fn drop(&mut self) {
1694        self.control_handle.shutdown();
1695        // Safety: drops once, never accessed again
1696        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1697    }
1698}
1699
1700impl fidl::endpoints::Responder for AllocatorValidateBufferCollectionTokenResponder {
1701    type ControlHandle = AllocatorControlHandle;
1702
1703    fn control_handle(&self) -> &AllocatorControlHandle {
1704        &self.control_handle
1705    }
1706
1707    fn drop_without_shutdown(mut self) {
1708        // Safety: drops once, never accessed again due to mem::forget
1709        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1710        // Prevent Drop from running (which would shut down the channel)
1711        std::mem::forget(self);
1712    }
1713}
1714
1715impl AllocatorValidateBufferCollectionTokenResponder {
1716    /// Sends a response to the FIDL transaction.
1717    ///
1718    /// Sets the channel to shutdown if an error occurs.
1719    pub fn send(
1720        self,
1721        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1722    ) -> Result<(), fidl::Error> {
1723        let _result = self.send_raw(payload);
1724        if _result.is_err() {
1725            self.control_handle.shutdown();
1726        }
1727        self.drop_without_shutdown();
1728        _result
1729    }
1730
1731    /// Similar to "send" but does not shutdown the channel if an error occurs.
1732    pub fn send_no_shutdown_on_err(
1733        self,
1734        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1735    ) -> Result<(), fidl::Error> {
1736        let _result = self.send_raw(payload);
1737        self.drop_without_shutdown();
1738        _result
1739    }
1740
1741    fn send_raw(
1742        &self,
1743        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1744    ) -> Result<(), fidl::Error> {
1745        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
1746            AllocatorValidateBufferCollectionTokenResponse,
1747        >>(
1748            fidl::encoding::Flexible::new(payload),
1749            self.tx_id,
1750            0x4c5ee91b02a7e68d,
1751            fidl::encoding::DynamicFlags::FLEXIBLE,
1752        )
1753    }
1754}
1755
1756#[must_use = "FIDL methods require a response to be sent"]
1757#[derive(Debug)]
1758pub struct AllocatorGetVmoInfoResponder {
1759    control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1760    tx_id: u32,
1761}
1762
1763/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1764/// if the responder is dropped without sending a response, so that the client
1765/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1766impl std::ops::Drop for AllocatorGetVmoInfoResponder {
1767    fn drop(&mut self) {
1768        self.control_handle.shutdown();
1769        // Safety: drops once, never accessed again
1770        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1771    }
1772}
1773
1774impl fidl::endpoints::Responder for AllocatorGetVmoInfoResponder {
1775    type ControlHandle = AllocatorControlHandle;
1776
1777    fn control_handle(&self) -> &AllocatorControlHandle {
1778        &self.control_handle
1779    }
1780
1781    fn drop_without_shutdown(mut self) {
1782        // Safety: drops once, never accessed again due to mem::forget
1783        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1784        // Prevent Drop from running (which would shut down the channel)
1785        std::mem::forget(self);
1786    }
1787}
1788
1789impl AllocatorGetVmoInfoResponder {
1790    /// Sends a response to the FIDL transaction.
1791    ///
1792    /// Sets the channel to shutdown if an error occurs.
1793    pub fn send(
1794        self,
1795        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1796    ) -> Result<(), fidl::Error> {
1797        let _result = self.send_raw(result);
1798        if _result.is_err() {
1799            self.control_handle.shutdown();
1800        }
1801        self.drop_without_shutdown();
1802        _result
1803    }
1804
1805    /// Similar to "send" but does not shutdown the channel if an error occurs.
1806    pub fn send_no_shutdown_on_err(
1807        self,
1808        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1809    ) -> Result<(), fidl::Error> {
1810        let _result = self.send_raw(result);
1811        self.drop_without_shutdown();
1812        _result
1813    }
1814
1815    fn send_raw(
1816        &self,
1817        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1818    ) -> Result<(), fidl::Error> {
1819        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
1820            AllocatorGetVmoInfoResponse,
1821            Error,
1822        >>(
1823            fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
1824            self.tx_id,
1825            0x21a881120aa0ddf9,
1826            fidl::encoding::DynamicFlags::FLEXIBLE,
1827        )
1828    }
1829}
1830
1831#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
1832pub struct BufferCollectionMarker;
1833
1834impl fidl::endpoints::ProtocolMarker for BufferCollectionMarker {
1835    type Proxy = BufferCollectionProxy;
1836    type RequestStream = BufferCollectionRequestStream;
1837    #[cfg(target_os = "fuchsia")]
1838    type SynchronousProxy = BufferCollectionSynchronousProxy;
1839
1840    const DEBUG_NAME: &'static str = "(anonymous) BufferCollection";
1841}
1842pub type BufferCollectionWaitForAllBuffersAllocatedResult =
1843    Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>;
1844pub type BufferCollectionCheckAllBuffersAllocatedResult = Result<(), Error>;
1845
1846pub trait BufferCollectionProxyInterface: Send + Sync {
1847    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
1848    fn r#sync(&self) -> Self::SyncResponseFut;
1849    fn r#release(&self) -> Result<(), fidl::Error>;
1850    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
1851    fn r#set_debug_client_info(
1852        &self,
1853        payload: &NodeSetDebugClientInfoRequest,
1854    ) -> Result<(), fidl::Error>;
1855    fn r#set_debug_timeout_log_deadline(
1856        &self,
1857        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
1858    ) -> Result<(), fidl::Error>;
1859    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
1860    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
1861        + Send;
1862    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
1863    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
1864        + Send;
1865    fn r#is_alternate_for(
1866        &self,
1867        payload: NodeIsAlternateForRequest,
1868    ) -> Self::IsAlternateForResponseFut;
1869    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
1870        + Send;
1871    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
1872    fn r#set_weak(&self) -> Result<(), fidl::Error>;
1873    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
1874    fn r#attach_node_tracking(
1875        &self,
1876        payload: NodeAttachNodeTrackingRequest,
1877    ) -> Result<(), fidl::Error>;
1878    fn r#set_constraints(
1879        &self,
1880        payload: BufferCollectionSetConstraintsRequest,
1881    ) -> Result<(), fidl::Error>;
1882    type WaitForAllBuffersAllocatedResponseFut: std::future::Future<
1883            Output = Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error>,
1884        > + Send;
1885    fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut;
1886    type CheckAllBuffersAllocatedResponseFut: std::future::Future<
1887            Output = Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error>,
1888        > + Send;
1889    fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut;
1890    fn r#attach_token(
1891        &self,
1892        payload: BufferCollectionAttachTokenRequest,
1893    ) -> Result<(), fidl::Error>;
1894    fn r#attach_lifetime_tracking(
1895        &self,
1896        payload: BufferCollectionAttachLifetimeTrackingRequest,
1897    ) -> Result<(), fidl::Error>;
1898}
1899#[derive(Debug)]
1900#[cfg(target_os = "fuchsia")]
1901pub struct BufferCollectionSynchronousProxy {
1902    client: fidl::client::sync::Client,
1903}
1904
1905#[cfg(target_os = "fuchsia")]
1906impl fidl::endpoints::SynchronousProxy for BufferCollectionSynchronousProxy {
1907    type Proxy = BufferCollectionProxy;
1908    type Protocol = BufferCollectionMarker;
1909
1910    fn from_channel(inner: fidl::Channel) -> Self {
1911        Self::new(inner)
1912    }
1913
1914    fn into_channel(self) -> fidl::Channel {
1915        self.client.into_channel()
1916    }
1917
1918    fn as_channel(&self) -> &fidl::Channel {
1919        self.client.as_channel()
1920    }
1921}
1922
1923#[cfg(target_os = "fuchsia")]
1924impl BufferCollectionSynchronousProxy {
1925    pub fn new(channel: fidl::Channel) -> Self {
1926        let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
1927        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
1928    }
1929
1930    pub fn into_channel(self) -> fidl::Channel {
1931        self.client.into_channel()
1932    }
1933
1934    /// Waits until an event arrives and returns it. It is safe for other
1935    /// threads to make concurrent requests while waiting for an event.
1936    pub fn wait_for_event(
1937        &self,
1938        deadline: zx::MonotonicInstant,
1939    ) -> Result<BufferCollectionEvent, fidl::Error> {
1940        BufferCollectionEvent::decode(self.client.wait_for_event(deadline)?)
1941    }
1942
1943    /// Ensure that previous messages have been received server side. This is
1944    /// particularly useful after previous messages that created new tokens,
1945    /// because a token must be known to the sysmem server before sending the
1946    /// token to another participant.
1947    ///
1948    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
1949    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
1950    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
1951    /// to mitigate the possibility of a hostile/fake
1952    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
1953    /// Another way is to pass the token to
1954    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
1955    /// the token as part of exchanging it for a
1956    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
1957    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
1958    /// of stalling.
1959    ///
1960    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
1961    /// and then starting and completing a `Sync`, it's then safe to send the
1962    /// `BufferCollectionToken` client ends to other participants knowing the
1963    /// server will recognize the tokens when they're sent by the other
1964    /// participants to sysmem in a
1965    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
1966    /// efficient way to create tokens while avoiding unnecessary round trips.
1967    ///
1968    /// Other options include waiting for each
1969    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
1970    /// individually (using separate call to `Sync` after each), or calling
1971    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
1972    /// converted to a `BufferCollection` via
1973    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
1974    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
1975    /// the sync step and can create multiple tokens at once.
1976    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
1977        let _response = self.client.send_query::<
1978            fidl::encoding::EmptyPayload,
1979            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
1980        >(
1981            (),
1982            0x11ac2555cf575b54,
1983            fidl::encoding::DynamicFlags::FLEXIBLE,
1984            ___deadline,
1985        )?
1986        .into_result::<BufferCollectionMarker>("sync")?;
1987        Ok(_response)
1988    }
1989
1990    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
1991    ///
1992    /// Normally a participant will convert a `BufferCollectionToken` into a
1993    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
1994    /// `Release` via the token (and then close the channel immediately or
1995    /// shortly later in response to server closing the server end), which
1996    /// avoids causing buffer collection failure. Without a prior `Release`,
1997    /// closing the `BufferCollectionToken` client end will cause buffer
1998    /// collection failure.
1999    ///
2000    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
2001    ///
2002    /// By default the server handles unexpected closure of a
2003    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
2004    /// first) by failing the buffer collection. Partly this is to expedite
2005    /// closing VMO handles to reclaim memory when any participant fails. If a
2006    /// participant would like to cleanly close a `BufferCollection` without
2007    /// causing buffer collection failure, the participant can send `Release`
2008    /// before closing the `BufferCollection` client end. The `Release` can
2009    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
2010    /// buffer collection won't require constraints from this node in order to
2011    /// allocate. If after `SetConstraints`, the constraints are retained and
2012    /// aggregated, despite the lack of `BufferCollection` connection at the
2013    /// time of constraints aggregation.
2014    ///
2015    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2016    ///
2017    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2018    /// end (without `Release` first) will trigger failure of the buffer
2019    /// collection. To close a `BufferCollectionTokenGroup` channel without
2020    /// failing the buffer collection, ensure that AllChildrenPresent() has been
2021    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2022    /// client end.
2023    ///
2024    /// If `Release` occurs before
2025    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2026    /// buffer collection will fail (triggered by reception of `Release` without
2027    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2028    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2029    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2030    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2031    /// close requires `AllChildrenPresent` (if not already sent), then
2032    /// `Release`, then close client end.
2033    ///
2034    /// If `Release` occurs after `AllChildrenPresent`, the children and all
2035    /// their constraints remain intact (just as they would if the
2036    /// `BufferCollectionTokenGroup` channel had remained open), and the client
2037    /// end close doesn't trigger buffer collection failure.
2038    ///
2039    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2040    ///
2041    /// For brevity, the per-channel-protocol paragraphs above ignore the
2042    /// separate failure domain created by
2043    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2044    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2045    /// unexpectedly closes (without `Release` first) and that client end is
2046    /// under a failure domain, instead of failing the whole buffer collection,
2047    /// the failure domain is failed, but the buffer collection itself is
2048    /// isolated from failure of the failure domain. Such failure domains can be
2049    /// nested, in which case only the inner-most failure domain in which the
2050    /// `Node` resides fails.
2051    pub fn r#release(&self) -> Result<(), fidl::Error> {
2052        self.client.send::<fidl::encoding::EmptyPayload>(
2053            (),
2054            0x6a5cae7d6d6e04c6,
2055            fidl::encoding::DynamicFlags::FLEXIBLE,
2056        )
2057    }
2058
2059    /// Set a name for VMOs in this buffer collection.
2060    ///
2061    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2062    /// will be truncated to fit. The name of the vmo will be suffixed with the
2063    /// buffer index within the collection (if the suffix fits within
2064    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2065    /// listed in the inspect data.
2066    ///
2067    /// The name only affects VMOs allocated after the name is set; this call
2068    /// does not rename existing VMOs. If multiple clients set different names
2069    /// then the larger priority value will win. Setting a new name with the
2070    /// same priority as a prior name doesn't change the name.
2071    ///
2072    /// All table fields are currently required.
2073    ///
2074    /// + request `priority` The name is only set if this is the first `SetName`
2075    ///   or if `priority` is greater than any previous `priority` value in
2076    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
2077    /// + request `name` The name for VMOs created under this buffer collection.
2078    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2079        self.client.send::<NodeSetNameRequest>(
2080            payload,
2081            0xb41f1624f48c1e9,
2082            fidl::encoding::DynamicFlags::FLEXIBLE,
2083        )
2084    }
2085
2086    /// Set information about the current client that can be used by sysmem to
2087    /// help diagnose leaking memory and allocation stalls waiting for a
2088    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2089    ///
2090    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2091    /// `Node`(s) derived from this `Node`, unless overriden by
2092    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2093    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2094    ///
2095    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2096    /// `Allocator` is the most efficient way to ensure that all
2097    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2098    /// set, and is also more efficient than separately sending the same debug
2099    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2100    /// created [`fuchsia.sysmem2/Node`].
2101    ///
2102    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2103    /// indicate which client is closing their channel first, leading to subtree
2104    /// failure (which can be normal if the purpose of the subtree is over, but
2105    /// if happening earlier than expected, the client-channel-specific name can
2106    /// help diagnose where the failure is first coming from, from sysmem's
2107    /// point of view).
2108    ///
2109    /// All table fields are currently required.
2110    ///
2111    /// + request `name` This can be an arbitrary string, but the current
2112    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
2113    /// + request `id` This can be an arbitrary id, but the current process ID
2114    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
2115    pub fn r#set_debug_client_info(
2116        &self,
2117        mut payload: &NodeSetDebugClientInfoRequest,
2118    ) -> Result<(), fidl::Error> {
2119        self.client.send::<NodeSetDebugClientInfoRequest>(
2120            payload,
2121            0x5cde8914608d99b1,
2122            fidl::encoding::DynamicFlags::FLEXIBLE,
2123        )
2124    }
2125
2126    /// Sysmem logs a warning if sysmem hasn't seen
2127    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
2128    /// within 5 seconds after creation of a new collection.
2129    ///
2130    /// Clients can call this method to change when the log is printed. If
2131    /// multiple client set the deadline, it's unspecified which deadline will
2132    /// take effect.
2133    ///
2134    /// In most cases the default works well.
2135    ///
2136    /// All table fields are currently required.
2137    ///
2138    /// + request `deadline` The time at which sysmem will start trying to log
2139    ///   the warning, unless all constraints are with sysmem by then.
2140    pub fn r#set_debug_timeout_log_deadline(
2141        &self,
2142        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
2143    ) -> Result<(), fidl::Error> {
2144        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
2145            payload,
2146            0x716b0af13d5c0806,
2147            fidl::encoding::DynamicFlags::FLEXIBLE,
2148        )
2149    }
2150
2151    /// This enables verbose logging for the buffer collection.
2152    ///
2153    /// Verbose logging includes constraints set via
2154    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
2155    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
2156    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
2157    /// the tree of `Node`(s).
2158    ///
2159    /// Normally sysmem prints only a single line complaint when aggregation
2160    /// fails, with just the specific detailed reason that aggregation failed,
2161    /// with little surrounding context.  While this is often enough to diagnose
2162    /// a problem if only a small change was made and everything was working
2163    /// before the small change, it's often not particularly helpful for getting
2164    /// a new buffer collection to work for the first time.  Especially with
2165    /// more complex trees of nodes, involving things like
2166    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
2167    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
2168    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
2169    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
2170    /// looks like and why it's failing a logical allocation, or why a tree or
2171    /// subtree is failing sooner than expected.
2172    ///
2173    /// The intent of the extra logging is to be acceptable from a performance
2174    /// point of view, under the assumption that verbose logging is only enabled
2175    /// on a low number of buffer collections. If we're not tracking down a bug,
2176    /// we shouldn't send this message.
2177    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
2178        self.client.send::<fidl::encoding::EmptyPayload>(
2179            (),
2180            0x5209c77415b4dfad,
2181            fidl::encoding::DynamicFlags::FLEXIBLE,
2182        )
2183    }
2184
2185    /// This gets a handle that can be used as a parameter to
2186    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
2187    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
2188    /// client obtained this handle from this `Node`.
2189    ///
2190    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
2191    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
2192    /// despite the two calls typically being on different channels.
2193    ///
2194    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
2195    ///
2196    /// All table fields are currently required.
2197    ///
2198    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
2199    ///   different `Node` channel, to prove that the client obtained the handle
2200    ///   from this `Node`.
2201    pub fn r#get_node_ref(
2202        &self,
2203        ___deadline: zx::MonotonicInstant,
2204    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
2205        let _response = self.client.send_query::<
2206            fidl::encoding::EmptyPayload,
2207            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
2208        >(
2209            (),
2210            0x5b3d0e51614df053,
2211            fidl::encoding::DynamicFlags::FLEXIBLE,
2212            ___deadline,
2213        )?
2214        .into_result::<BufferCollectionMarker>("get_node_ref")?;
2215        Ok(_response)
2216    }
2217
2218    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
2219    /// rooted at a different child token of a common parent
2220    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
2221    /// passed-in `node_ref`.
2222    ///
2223    /// This call is for assisting with admission control de-duplication, and
2224    /// with debugging.
2225    ///
2226    /// The `node_ref` must be obtained using
2227    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
2228    ///
2229    /// The `node_ref` can be a duplicated handle; it's not necessary to call
2230    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
2231    ///
2232    /// If a calling token may not actually be a valid token at all due to a
2233    /// potentially hostile/untrusted provider of the token, call
2234    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
2235    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
2236    /// never responds due to a calling token not being a real token (not really
2237    /// talking to sysmem).  Another option is to call
2238    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
2239    /// which also validates the token along with converting it to a
2240    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
2241    ///
2242    /// All table fields are currently required.
2243    ///
2244    /// - response `is_alternate`
2245    ///   - true: The first parent node in common between the calling node and
2246    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
2247    ///     that the calling `Node` and the `node_ref` `Node` will not have both
2248    ///     their constraints apply - rather sysmem will choose one or the other
2249    ///     of the constraints - never both.  This is because only one child of
2250    ///     a `BufferCollectionTokenGroup` is selected during logical
2251    ///     allocation, with only that one child's subtree contributing to
2252    ///     constraints aggregation.
2253    ///   - false: The first parent node in common between the calling `Node`
2254    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
2255    ///     Currently, this means the first parent node in common is a
2256    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
2257    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
2258    ///     `Node` may have both their constraints apply during constraints
2259    ///     aggregation of the logical allocation, if both `Node`(s) are
2260    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
2261    ///     this case, there is no `BufferCollectionTokenGroup` that will
2262    ///     directly prevent the two `Node`(s) from both being selected and
2263    ///     their constraints both aggregated, but even when false, one or both
2264    ///     `Node`(s) may still be eliminated from consideration if one or both
2265    ///     `Node`(s) has a direct or indirect parent
2266    ///     `BufferCollectionTokenGroup` which selects a child subtree other
2267    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
2268    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
2269    ///   associated with the same buffer collection as the calling `Node`.
2270    ///   Another reason for this error is if the `node_ref` is an
2271    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
2272    ///   a real `node_ref` obtained from `GetNodeRef`.
2273    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
2274    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
2275    ///   the needed rights expected on a real `node_ref`.
2276    /// * No other failing status codes are returned by this call.  However,
2277    ///   sysmem may add additional codes in future, so the client should have
2278    ///   sensible default handling for any failing status code.
2279    pub fn r#is_alternate_for(
2280        &self,
2281        mut payload: NodeIsAlternateForRequest,
2282        ___deadline: zx::MonotonicInstant,
2283    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
2284        let _response = self.client.send_query::<
2285            NodeIsAlternateForRequest,
2286            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
2287        >(
2288            &mut payload,
2289            0x3a58e00157e0825,
2290            fidl::encoding::DynamicFlags::FLEXIBLE,
2291            ___deadline,
2292        )?
2293        .into_result::<BufferCollectionMarker>("is_alternate_for")?;
2294        Ok(_response.map(|x| x))
2295    }
2296
2297    /// Get the buffer collection ID. This ID is also available from
2298    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
2299    /// within the collection).
2300    ///
2301    /// This call is mainly useful in situations where we can't convey a
2302    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
2303    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
2304    /// handle, which can be joined back up with a `BufferCollection` client end
2305    /// that was created via a different path. Prefer to convey a
2306    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
2307    ///
2308    /// Trusting a `buffer_collection_id` value from a source other than sysmem
2309    /// is analogous to trusting a koid value from a source other than zircon.
2310    /// Both should be avoided unless really necessary, and both require
2311    /// caution. In some situations it may be reasonable to refer to a
2312    /// pre-established `BufferCollection` by `buffer_collection_id` via a
2313    /// protocol for efficiency reasons, but an incoming value purporting to be
2314    /// a `buffer_collection_id` is not sufficient alone to justify granting the
2315    /// sender of the `buffer_collection_id` any capability. The sender must
2316    /// first prove to a receiver that the sender has/had a VMO or has/had a
2317    /// `BufferCollectionToken` to the same collection by sending a handle that
2318    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
2319    /// `buffer_collection_id` value. The receiver should take care to avoid
2320    /// assuming that a sender had a `BufferCollectionToken` in cases where the
2321    /// sender has only proven that the sender had a VMO.
2322    ///
2323    /// - response `buffer_collection_id` This ID is unique per buffer
2324    ///   collection per boot. Each buffer is uniquely identified by the
2325    ///   `buffer_collection_id` and `buffer_index` together.
2326    pub fn r#get_buffer_collection_id(
2327        &self,
2328        ___deadline: zx::MonotonicInstant,
2329    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
2330        let _response = self.client.send_query::<
2331            fidl::encoding::EmptyPayload,
2332            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
2333        >(
2334            (),
2335            0x77d19a494b78ba8c,
2336            fidl::encoding::DynamicFlags::FLEXIBLE,
2337            ___deadline,
2338        )?
2339        .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
2340        Ok(_response)
2341    }
2342
2343    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
2344    /// created after this message to weak, which means that a client's `Node`
2345    /// client end (or a child created after this message) is not alone
2346    /// sufficient to keep allocated VMOs alive.
2347    ///
2348    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
2349    /// `close_weak_asap`.
2350    ///
2351    /// This message is only permitted before the `Node` becomes ready for
2352    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
2353    ///   * `BufferCollectionToken`: any time
2354    ///   * `BufferCollection`: before `SetConstraints`
2355    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
2356    ///
2357    /// Currently, no conversion from strong `Node` to weak `Node` after ready
2358    /// for allocation is provided, but a client can simulate that by creating
2359    /// an additional `Node` before allocation and setting that additional
2360    /// `Node` to weak, and then potentially at some point later sending
2361    /// `Release` and closing the client end of the client's strong `Node`, but
2362    /// keeping the client's weak `Node`.
2363    ///
2364    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
2365    /// collection failure (all `Node` client end(s) will see
2366    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
2367    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
2368    /// this situation until all `Node`(s) are ready for allocation. For initial
2369    /// allocation to succeed, at least one strong `Node` is required to exist
2370    /// at allocation time, but after that client receives VMO handles, that
2371    /// client can `BufferCollection.Release` and close the client end without
2372    /// causing this type of failure.
2373    ///
2374    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
2375    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
2376    /// separately as appropriate.
2377    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
2378        self.client.send::<fidl::encoding::EmptyPayload>(
2379            (),
2380            0x22dd3ea514eeffe1,
2381            fidl::encoding::DynamicFlags::FLEXIBLE,
2382        )
2383    }
2384
2385    /// This indicates to sysmem that the client is prepared to pay attention to
2386    /// `close_weak_asap`.
2387    ///
2388    /// If sent, this message must be before
2389    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
2390    ///
2391    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
2392    /// send this message before `WaitForAllBuffersAllocated`, or a parent
2393    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
2394    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
2395    /// trigger buffer collection failure.
2396    ///
2397    /// This message is necessary because weak sysmem VMOs have not always been
2398    /// a thing, so older clients are not aware of the need to pay attention to
2399    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
2400    /// sysmem weak VMO handles asap. By having this message and requiring
2401    /// participants to indicate their acceptance of this aspect of the overall
2402    /// protocol, we avoid situations where an older client is delivered a weak
2403    /// VMO without any way for sysmem to get that VMO to close quickly later
2404    /// (and on a per-buffer basis).
2405    ///
2406    /// A participant that doesn't handle `close_weak_asap` and also doesn't
2407    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
2408    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
2409    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
2410    /// same participant has a child/delegate which does retrieve VMOs, that
2411    /// child/delegate will need to send `SetWeakOk` before
2412    /// `WaitForAllBuffersAllocated`.
2413    ///
2414    /// + request `for_child_nodes_also` If present and true, this means direct
2415    ///   child nodes of this node created after this message plus all
2416    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
2417    ///   those nodes. Any child node of this node that was created before this
2418    ///   message is not included. This setting is "sticky" in the sense that a
2419    ///   subsequent `SetWeakOk` without this bool set to true does not reset
2420    ///   the server-side bool. If this creates a problem for a participant, a
2421    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
2422    ///   tokens instead, as appropriate. A participant should only set
2423    ///   `for_child_nodes_also` true if the participant can really promise to
2424    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
2425    ///   weak VMO handles held by participants holding the corresponding child
2426    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
2427    ///   which are using sysmem(1) can be weak, despite the clients of those
2428    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
2429    ///   direct way to find out about `close_weak_asap`. This only applies to
2430    ///   descendents of this `Node` which are using sysmem(1), not to this
2431    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
2432    ///   token, which will fail allocation unless an ancestor of this `Node`
2433    ///   specified `for_child_nodes_also` true.
2434    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
2435        self.client.send::<NodeSetWeakOkRequest>(
2436            &mut payload,
2437            0x38a44fc4d7724be9,
2438            fidl::encoding::DynamicFlags::FLEXIBLE,
2439        )
2440    }
2441
2442    /// The server_end will be closed after this `Node` and any child nodes have
2443    /// have released their buffer counts, making those counts available for
2444    /// reservation by a different `Node` via
2445    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
2446    ///
2447    /// The `Node` buffer counts may not be released until the entire tree of
2448    /// `Node`(s) is closed or failed, because
2449    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
2450    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
2451    /// `Node` buffer counts remain reserved until the orphaned node is later
2452    /// cleaned up.
2453    ///
2454    /// If the `Node` exceeds a fairly large number of attached eventpair server
2455    /// ends, a log message will indicate this and the `Node` (and the
2456    /// appropriate) sub-tree will fail.
2457    ///
2458    /// The `server_end` will remain open when
2459    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
2460    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
2461    /// [`fuchsia.sysmem2/BufferCollection`].
2462    ///
2463    /// This message can also be used with a
2464    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
2465    pub fn r#attach_node_tracking(
2466        &self,
2467        mut payload: NodeAttachNodeTrackingRequest,
2468    ) -> Result<(), fidl::Error> {
2469        self.client.send::<NodeAttachNodeTrackingRequest>(
2470            &mut payload,
2471            0x3f22f2a293d3cdac,
2472            fidl::encoding::DynamicFlags::FLEXIBLE,
2473        )
2474    }
2475
2476    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
2477    /// collection.
2478    ///
2479    /// A participant may only call
2480    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
2481    /// [`fuchsia.sysmem2/BufferCollection`].
2482    ///
2483    /// For buffer allocation to be attempted, all holders of a
2484    /// `BufferCollection` client end need to call `SetConstraints` before
2485    /// sysmem will attempt to allocate buffers.
2486    ///
2487    /// + request `constraints` These are the constraints on the buffer
2488    ///   collection imposed by the sending client/participant.  The
2489    ///   `constraints` field is not required to be set. If not set, the client
2490    ///   is not setting any actual constraints, but is indicating that the
2491    ///   client has no constraints to set. A client that doesn't set the
2492    ///   `constraints` field won't receive any VMO handles, but can still find
2493    ///   out how many buffers were allocated and can still refer to buffers by
2494    ///   their `buffer_index`.
2495    pub fn r#set_constraints(
2496        &self,
2497        mut payload: BufferCollectionSetConstraintsRequest,
2498    ) -> Result<(), fidl::Error> {
2499        self.client.send::<BufferCollectionSetConstraintsRequest>(
2500            &mut payload,
2501            0x1fde0f19d650197b,
2502            fidl::encoding::DynamicFlags::FLEXIBLE,
2503        )
2504    }
2505
2506    /// Wait until all buffers are allocated.
2507    ///
2508    /// This FIDL call completes when buffers have been allocated, or completes
2509    /// with some failure detail if allocation has been attempted but failed.
2510    ///
2511    /// The following must occur before buffers will be allocated:
2512    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
2513    ///     collection must be turned in via `BindSharedCollection` to get a
2514    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
2515    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
2516    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
2517    ///     to them.
2518    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
2519    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
2520    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
2521    ///     sent to them.
2522    ///
2523    /// - result `buffer_collection_info` The VMO handles and other related
2524    ///   info.
2525    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
2526    ///   cannot be fulfilled due to resource exhaustion.
2527    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
2528    ///   malformed.
2529    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
2530    ///   request is valid but cannot be satisfied, perhaps due to hardware
2531    ///   limitations. This can happen if participants have incompatible
2532    ///   constraints (empty intersection, roughly speaking). See the log for
2533    ///   more info. In cases where a participant could potentially be treated
2534    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
2535    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
2536    ///   error code if there aren't enough buffers in the pre-existing
2537    ///   collection to satisfy the constraints set on the attached token and
2538    ///   any sub-tree of tokens derived from the attached token.
2539    pub fn r#wait_for_all_buffers_allocated(
2540        &self,
2541        ___deadline: zx::MonotonicInstant,
2542    ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
2543        let _response = self
2544            .client
2545            .send_query::<fidl::encoding::EmptyPayload, fidl::encoding::FlexibleResultType<
2546                BufferCollectionWaitForAllBuffersAllocatedResponse,
2547                Error,
2548            >>(
2549                (), 0x62300344b61404e, fidl::encoding::DynamicFlags::FLEXIBLE, ___deadline
2550            )?
2551            .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
2552        Ok(_response.map(|x| x))
2553    }
2554
2555    /// Checks whether all the buffers have been allocated, in a polling
2556    /// fashion.
2557    ///
2558    /// * If the buffer collection has been allocated, returns success.
2559    /// * If the buffer collection failed allocation, returns the same
2560    ///   [`fuchsia.sysmem2/Error`] as
2561    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
2562    ///   return.
2563    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
2564    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
2565    ///   would not respond quickly.
2566    pub fn r#check_all_buffers_allocated(
2567        &self,
2568        ___deadline: zx::MonotonicInstant,
2569    ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
2570        let _response = self.client.send_query::<
2571            fidl::encoding::EmptyPayload,
2572            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
2573        >(
2574            (),
2575            0x35a5fe77ce939c10,
2576            fidl::encoding::DynamicFlags::FLEXIBLE,
2577            ___deadline,
2578        )?
2579        .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
2580        Ok(_response.map(|x| x))
2581    }
2582
2583    /// Create a new token to add a new participant to an existing logical
2584    /// buffer collection, if the existing collection's buffer counts,
2585    /// constraints, and participants allow.
2586    ///
2587    /// This can be useful in replacing a failed participant, and/or in
2588    /// adding/re-adding a participant after buffers have already been
2589    /// allocated.
2590    ///
2591    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
2592    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
2593    /// goes through the normal procedure of setting constraints or closing
2594    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
2595    /// clients' point of view, despite the possibility that all the buffers
2596    /// were actually allocated previously. This process is called "logical
2597    /// allocation". Most instances of "allocation" in docs for other messages
2598    /// can also be read as "allocation or logical allocation" while remaining
2599    /// valid, but we just say "allocation" in most places for brevity/clarity
2600    /// of explanation, with the details of "logical allocation" left for the
2601    /// docs here on `AttachToken`.
2602    ///
2603    /// Failure of an attached `Node` does not propagate to the parent of the
2604    /// attached `Node`. More generally, failure of a child `Node` is blocked
2605    /// from reaching its parent `Node` if the child is attached, or if the
2606    /// child is dispensable and the failure occurred after logical allocation
2607    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
2608    ///
2609    /// A participant may in some scenarios choose to initially use a
2610    /// dispensable token for a given instance of a delegate participant, and
2611    /// then later if the first instance of that delegate participant fails, a
2612    /// new second instance of that delegate participant my be given a token
2613    /// created with `AttachToken`.
2614    ///
2615    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
2616    /// client end, the token acts like any other token. The client can
2617    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
2618    /// and can send the token to a different process/participant. The
2619    /// `BufferCollectionToken` `Node` should be converted to a
2620    /// `BufferCollection` `Node` as normal by sending
2621    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
2622    /// without causing subtree failure by sending
2623    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
2624    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
2625    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
2626    /// the `BufferCollection`.
2627    ///
2628    /// Within the subtree, a success result from
2629    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
2630    /// the subtree participants' constraints were satisfiable using the
2631    /// already-existing buffer collection, the already-established
2632    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
2633    /// constraints, and the already-existing other participants (already added
2634    /// via successful logical allocation) and their specified buffer counts in
2635    /// their constraints. A failure result means the new participants'
2636    /// constraints cannot be satisfied using the existing buffer collection and
2637    /// its already-added participants. Creating a new collection instead may
2638    /// allow all participants' constraints to be satisfied, assuming
2639    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
2640    /// used.
2641    ///
2642    /// A token created with `AttachToken` performs constraints aggregation with
2643    /// all constraints currently in effect on the buffer collection, plus the
2644    /// attached token under consideration plus child tokens under the attached
2645    /// token which are not themselves an attached token or under such a token.
2646    /// Further subtrees under this subtree are considered for logical
2647    /// allocation only after this subtree has completed logical allocation.
2648    ///
2649    /// Assignment of existing buffers to participants'
2650    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
2651    /// etc is first-come first-served, but a child can't logically allocate
2652    /// before all its parents have sent `SetConstraints`.
2653    ///
2654    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
2655    /// in contrast to `AttachToken`, has the created token `Node` + child
2656    /// `Node`(s) (in the created subtree but not in any subtree under this
2657    /// subtree) participate in constraints aggregation along with its parent
2658    /// during the parent's allocation or logical allocation.
2659    ///
2660    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
2661    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
2662    /// sysmem before the new token can be passed to `BindSharedCollection`. The
2663    /// `Sync` of the new token can be accomplished with
2664    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
2665    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
2666    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
2667    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
2668    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
2669    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
2670    /// created token, to also sync those additional tokens to sysmem using a
2671    /// single round-trip.
2672    ///
2673    /// All table fields are currently required.
2674    ///
2675    /// + request `rights_attentuation_mask` This allows attenuating the VMO
2676    ///   rights of the subtree. These values for `rights_attenuation_mask`
2677    ///   result in no attenuation (note that 0 is not on this list):
2678    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
2679    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
2680    /// + request `token_request` The server end of the `BufferCollectionToken`
2681    ///   channel. The client retains the client end.
2682    pub fn r#attach_token(
2683        &self,
2684        mut payload: BufferCollectionAttachTokenRequest,
2685    ) -> Result<(), fidl::Error> {
2686        self.client.send::<BufferCollectionAttachTokenRequest>(
2687            &mut payload,
2688            0x46ac7d0008492982,
2689            fidl::encoding::DynamicFlags::FLEXIBLE,
2690        )
2691    }
2692
2693    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
2694    /// buffers have been allocated and only the specified number of buffers (or
2695    /// fewer) remain in the buffer collection.
2696    ///
2697    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
2698    /// client to wait until an old buffer collection is fully or mostly
2699    /// deallocated before attempting allocation of a new buffer collection. The
2700    /// eventpair is only signalled when the buffers of this collection have
2701    /// been fully deallocated (not just un-referenced by clients, but all the
2702    /// memory consumed by those buffers has been fully reclaimed/recycled), or
2703    /// when allocation or logical allocation fails for the tree or subtree
2704    /// including this [`fuchsia.sysmem2/BufferCollection`].
2705    ///
2706    /// The eventpair won't be signalled until allocation or logical allocation
2707    /// has completed; until then, the collection's current buffer count is
2708    /// ignored.
2709    ///
2710    /// If logical allocation fails for an attached subtree (using
2711    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
2712    /// eventpair will close during that failure regardless of the number of
2713    /// buffers potenitally allocated in the overall buffer collection. This is
2714    /// for logical allocation consistency with normal allocation.
2715    ///
2716    /// The lifetime signalled by this event includes asynchronous cleanup of
2717    /// allocated buffers, and this asynchronous cleanup cannot occur until all
2718    /// holders of VMO handles to the buffers have closed those VMO handles.
2719    /// Therefore, clients should take care not to become blocked forever
2720    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
2721    /// participants using the logical buffer collection (including the waiter
2722    /// itself) are less trusted, less reliable, or potentially blocked by the
2723    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
2724    /// for the client wait may be prudent, depending on details of how the
2725    /// collection and/or its VMOs are used or shared. Failure to allocate a
2726    /// new/replacement buffer collection is better than getting stuck forever.
2727    ///
2728    /// The sysmem server itself intentionally does not perform any waiting on
2729    /// already-failed collections' VMOs to finish cleaning up before attempting
2730    /// a new allocation, and the sysmem server intentionally doesn't retry
2731    /// allocation if a new allocation fails due to out of memory, even if that
2732    /// failure is potentially due to continued existence of an old collection's
2733    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
2734    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
2735    /// as long as the waiting client is careful to not create a deadlock.
2736    ///
2737    /// Continued existence of old collections that are still cleaning up is not
2738    /// the only reason that a new allocation may fail due to insufficient
2739    /// memory, even if the new allocation is allocating physically contiguous
2740    /// buffers. Overall system memory pressure can also be the cause of failure
2741    /// to allocate a new collection. See also
2742    /// [`fuchsia.memorypressure/Provider`].
2743    ///
2744    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
2745    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
2746    /// `eventpair` handle (server end) can be sent via more than one
2747    /// `AttachLifetimeTracking` message to different protocols, and the
2748    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
2749    /// the conditions are met (all holders of duplicates have closed their
2750    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
2751    /// client end can (also) be duplicated without preventing the
2752    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
2753    ///
2754    /// The server intentionally doesn't "trust" any signals set on the
2755    /// `server_end`. This mechanism intentionally uses only
2756    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
2757    /// "early", and is only set when all handles to the server end eventpair
2758    /// are closed. No meaning is associated with any of the other signals, and
2759    /// clients should ignore any other signal bits on either end of the
2760    /// `eventpair`.
2761    ///
2762    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
2763    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
2764    /// transfer without causing `BufferCollection` channel failure).
2765    ///
2766    /// All table fields are currently required.
2767    ///
2768    /// + request `server_end` This eventpair handle will be closed by the
2769    ///   sysmem server when buffers have been allocated initially and the
2770    ///   number of buffers is then less than or equal to `buffers_remaining`.
2771    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
2772    ///   fewer) buffers to be fully deallocated. A number greater than zero can
2773    ///   be useful in situations where a known number of buffers are
2774    ///   intentionally not closed so that the data can continue to be used,
2775    ///   such as for keeping the last available video frame displayed in the UI
2776    ///   even if the video stream was using protected output buffers. It's
2777    ///   outside the scope of the `BufferCollection` interface (at least for
2778    ///   now) to determine how many buffers may be held without closing, but
2779    ///   it'll typically be in the range 0-2.
2780    pub fn r#attach_lifetime_tracking(
2781        &self,
2782        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
2783    ) -> Result<(), fidl::Error> {
2784        self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
2785            &mut payload,
2786            0x3ecb510113116dcf,
2787            fidl::encoding::DynamicFlags::FLEXIBLE,
2788        )
2789    }
2790}
2791
2792#[cfg(target_os = "fuchsia")]
2793impl From<BufferCollectionSynchronousProxy> for zx::NullableHandle {
2794    fn from(value: BufferCollectionSynchronousProxy) -> Self {
2795        value.into_channel().into()
2796    }
2797}
2798
2799#[cfg(target_os = "fuchsia")]
2800impl From<fidl::Channel> for BufferCollectionSynchronousProxy {
2801    fn from(value: fidl::Channel) -> Self {
2802        Self::new(value)
2803    }
2804}
2805
2806#[cfg(target_os = "fuchsia")]
2807impl fidl::endpoints::FromClient for BufferCollectionSynchronousProxy {
2808    type Protocol = BufferCollectionMarker;
2809
2810    fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionMarker>) -> Self {
2811        Self::new(value.into_channel())
2812    }
2813}
2814
2815#[derive(Debug, Clone)]
2816pub struct BufferCollectionProxy {
2817    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
2818}
2819
2820impl fidl::endpoints::Proxy for BufferCollectionProxy {
2821    type Protocol = BufferCollectionMarker;
2822
2823    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
2824        Self::new(inner)
2825    }
2826
2827    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
2828        self.client.into_channel().map_err(|client| Self { client })
2829    }
2830
2831    fn as_channel(&self) -> &::fidl::AsyncChannel {
2832        self.client.as_channel()
2833    }
2834}
2835
2836impl BufferCollectionProxy {
2837    /// Create a new Proxy for fuchsia.sysmem2/BufferCollection.
2838    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
2839        let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
2840        Self { client: fidl::client::Client::new(channel, protocol_name) }
2841    }
2842
2843    /// Get a Stream of events from the remote end of the protocol.
2844    ///
2845    /// # Panics
2846    ///
2847    /// Panics if the event stream was already taken.
2848    pub fn take_event_stream(&self) -> BufferCollectionEventStream {
2849        BufferCollectionEventStream { event_receiver: self.client.take_event_receiver() }
2850    }
2851
2852    /// Ensure that previous messages have been received server side. This is
2853    /// particularly useful after previous messages that created new tokens,
2854    /// because a token must be known to the sysmem server before sending the
2855    /// token to another participant.
2856    ///
2857    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
2858    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
2859    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
2860    /// to mitigate the possibility of a hostile/fake
2861    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
2862    /// Another way is to pass the token to
2863    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
2864    /// the token as part of exchanging it for a
2865    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
2866    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
2867    /// of stalling.
2868    ///
2869    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
2870    /// and then starting and completing a `Sync`, it's then safe to send the
2871    /// `BufferCollectionToken` client ends to other participants knowing the
2872    /// server will recognize the tokens when they're sent by the other
2873    /// participants to sysmem in a
2874    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
2875    /// efficient way to create tokens while avoiding unnecessary round trips.
2876    ///
2877    /// Other options include waiting for each
2878    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
2879    /// individually (using separate call to `Sync` after each), or calling
2880    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
2881    /// converted to a `BufferCollection` via
2882    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
2883    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
2884    /// the sync step and can create multiple tokens at once.
2885    pub fn r#sync(
2886        &self,
2887    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
2888        BufferCollectionProxyInterface::r#sync(self)
2889    }
2890
2891    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
2892    ///
2893    /// Normally a participant will convert a `BufferCollectionToken` into a
2894    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
2895    /// `Release` via the token (and then close the channel immediately or
2896    /// shortly later in response to server closing the server end), which
2897    /// avoids causing buffer collection failure. Without a prior `Release`,
2898    /// closing the `BufferCollectionToken` client end will cause buffer
2899    /// collection failure.
2900    ///
2901    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
2902    ///
2903    /// By default the server handles unexpected closure of a
2904    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
2905    /// first) by failing the buffer collection. Partly this is to expedite
2906    /// closing VMO handles to reclaim memory when any participant fails. If a
2907    /// participant would like to cleanly close a `BufferCollection` without
2908    /// causing buffer collection failure, the participant can send `Release`
2909    /// before closing the `BufferCollection` client end. The `Release` can
2910    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
2911    /// buffer collection won't require constraints from this node in order to
2912    /// allocate. If after `SetConstraints`, the constraints are retained and
2913    /// aggregated, despite the lack of `BufferCollection` connection at the
2914    /// time of constraints aggregation.
2915    ///
2916    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2917    ///
2918    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2919    /// end (without `Release` first) will trigger failure of the buffer
2920    /// collection. To close a `BufferCollectionTokenGroup` channel without
2921    /// failing the buffer collection, ensure that AllChildrenPresent() has been
2922    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2923    /// client end.
2924    ///
2925    /// If `Release` occurs before
2926    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2927    /// buffer collection will fail (triggered by reception of `Release` without
2928    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2929    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2930    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2931    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2932    /// close requires `AllChildrenPresent` (if not already sent), then
2933    /// `Release`, then close client end.
2934    ///
2935    /// If `Release` occurs after `AllChildrenPresent`, the children and all
2936    /// their constraints remain intact (just as they would if the
2937    /// `BufferCollectionTokenGroup` channel had remained open), and the client
2938    /// end close doesn't trigger buffer collection failure.
2939    ///
2940    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2941    ///
2942    /// For brevity, the per-channel-protocol paragraphs above ignore the
2943    /// separate failure domain created by
2944    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2945    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2946    /// unexpectedly closes (without `Release` first) and that client end is
2947    /// under a failure domain, instead of failing the whole buffer collection,
2948    /// the failure domain is failed, but the buffer collection itself is
2949    /// isolated from failure of the failure domain. Such failure domains can be
2950    /// nested, in which case only the inner-most failure domain in which the
2951    /// `Node` resides fails.
2952    pub fn r#release(&self) -> Result<(), fidl::Error> {
2953        BufferCollectionProxyInterface::r#release(self)
2954    }
2955
2956    /// Set a name for VMOs in this buffer collection.
2957    ///
2958    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2959    /// will be truncated to fit. The name of the vmo will be suffixed with the
2960    /// buffer index within the collection (if the suffix fits within
2961    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2962    /// listed in the inspect data.
2963    ///
2964    /// The name only affects VMOs allocated after the name is set; this call
2965    /// does not rename existing VMOs. If multiple clients set different names
2966    /// then the larger priority value will win. Setting a new name with the
2967    /// same priority as a prior name doesn't change the name.
2968    ///
2969    /// All table fields are currently required.
2970    ///
2971    /// + request `priority` The name is only set if this is the first `SetName`
2972    ///   or if `priority` is greater than any previous `priority` value in
2973    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
2974    /// + request `name` The name for VMOs created under this buffer collection.
2975    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2976        BufferCollectionProxyInterface::r#set_name(self, payload)
2977    }
2978
2979    /// Set information about the current client that can be used by sysmem to
2980    /// help diagnose leaking memory and allocation stalls waiting for a
2981    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2982    ///
2983    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2984    /// `Node`(s) derived from this `Node`, unless overriden by
2985    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2986    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2987    ///
2988    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2989    /// `Allocator` is the most efficient way to ensure that all
2990    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2991    /// set, and is also more efficient than separately sending the same debug
2992    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2993    /// created [`fuchsia.sysmem2/Node`].
2994    ///
2995    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2996    /// indicate which client is closing their channel first, leading to subtree
2997    /// failure (which can be normal if the purpose of the subtree is over, but
2998    /// if happening earlier than expected, the client-channel-specific name can
2999    /// help diagnose where the failure is first coming from, from sysmem's
3000    /// point of view).
3001    ///
3002    /// All table fields are currently required.
3003    ///
3004    /// + request `name` This can be an arbitrary string, but the current
3005    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
3006    /// + request `id` This can be an arbitrary id, but the current process ID
3007    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
3008    pub fn r#set_debug_client_info(
3009        &self,
3010        mut payload: &NodeSetDebugClientInfoRequest,
3011    ) -> Result<(), fidl::Error> {
3012        BufferCollectionProxyInterface::r#set_debug_client_info(self, payload)
3013    }
3014
3015    /// Sysmem logs a warning if sysmem hasn't seen
3016    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
3017    /// within 5 seconds after creation of a new collection.
3018    ///
3019    /// Clients can call this method to change when the log is printed. If
3020    /// multiple client set the deadline, it's unspecified which deadline will
3021    /// take effect.
3022    ///
3023    /// In most cases the default works well.
3024    ///
3025    /// All table fields are currently required.
3026    ///
3027    /// + request `deadline` The time at which sysmem will start trying to log
3028    ///   the warning, unless all constraints are with sysmem by then.
3029    pub fn r#set_debug_timeout_log_deadline(
3030        &self,
3031        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3032    ) -> Result<(), fidl::Error> {
3033        BufferCollectionProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
3034    }
3035
3036    /// This enables verbose logging for the buffer collection.
3037    ///
3038    /// Verbose logging includes constraints set via
3039    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
3040    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
3041    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
3042    /// the tree of `Node`(s).
3043    ///
3044    /// Normally sysmem prints only a single line complaint when aggregation
3045    /// fails, with just the specific detailed reason that aggregation failed,
3046    /// with little surrounding context.  While this is often enough to diagnose
3047    /// a problem if only a small change was made and everything was working
3048    /// before the small change, it's often not particularly helpful for getting
3049    /// a new buffer collection to work for the first time.  Especially with
3050    /// more complex trees of nodes, involving things like
3051    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
3052    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
3053    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
3054    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
3055    /// looks like and why it's failing a logical allocation, or why a tree or
3056    /// subtree is failing sooner than expected.
3057    ///
3058    /// The intent of the extra logging is to be acceptable from a performance
3059    /// point of view, under the assumption that verbose logging is only enabled
3060    /// on a low number of buffer collections. If we're not tracking down a bug,
3061    /// we shouldn't send this message.
3062    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3063        BufferCollectionProxyInterface::r#set_verbose_logging(self)
3064    }
3065
3066    /// This gets a handle that can be used as a parameter to
3067    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
3068    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
3069    /// client obtained this handle from this `Node`.
3070    ///
3071    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
3072    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
3073    /// despite the two calls typically being on different channels.
3074    ///
3075    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
3076    ///
3077    /// All table fields are currently required.
3078    ///
3079    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
3080    ///   different `Node` channel, to prove that the client obtained the handle
3081    ///   from this `Node`.
3082    pub fn r#get_node_ref(
3083        &self,
3084    ) -> fidl::client::QueryResponseFut<
3085        NodeGetNodeRefResponse,
3086        fidl::encoding::DefaultFuchsiaResourceDialect,
3087    > {
3088        BufferCollectionProxyInterface::r#get_node_ref(self)
3089    }
3090
3091    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
3092    /// rooted at a different child token of a common parent
3093    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
3094    /// passed-in `node_ref`.
3095    ///
3096    /// This call is for assisting with admission control de-duplication, and
3097    /// with debugging.
3098    ///
3099    /// The `node_ref` must be obtained using
3100    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
3101    ///
3102    /// The `node_ref` can be a duplicated handle; it's not necessary to call
3103    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
3104    ///
3105    /// If a calling token may not actually be a valid token at all due to a
3106    /// potentially hostile/untrusted provider of the token, call
3107    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
3108    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
3109    /// never responds due to a calling token not being a real token (not really
3110    /// talking to sysmem).  Another option is to call
3111    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
3112    /// which also validates the token along with converting it to a
3113    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
3114    ///
3115    /// All table fields are currently required.
3116    ///
3117    /// - response `is_alternate`
3118    ///   - true: The first parent node in common between the calling node and
3119    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
3120    ///     that the calling `Node` and the `node_ref` `Node` will not have both
3121    ///     their constraints apply - rather sysmem will choose one or the other
3122    ///     of the constraints - never both.  This is because only one child of
3123    ///     a `BufferCollectionTokenGroup` is selected during logical
3124    ///     allocation, with only that one child's subtree contributing to
3125    ///     constraints aggregation.
3126    ///   - false: The first parent node in common between the calling `Node`
3127    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
3128    ///     Currently, this means the first parent node in common is a
3129    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
3130    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
3131    ///     `Node` may have both their constraints apply during constraints
3132    ///     aggregation of the logical allocation, if both `Node`(s) are
3133    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
3134    ///     this case, there is no `BufferCollectionTokenGroup` that will
3135    ///     directly prevent the two `Node`(s) from both being selected and
3136    ///     their constraints both aggregated, but even when false, one or both
3137    ///     `Node`(s) may still be eliminated from consideration if one or both
3138    ///     `Node`(s) has a direct or indirect parent
3139    ///     `BufferCollectionTokenGroup` which selects a child subtree other
3140    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
3141    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
3142    ///   associated with the same buffer collection as the calling `Node`.
3143    ///   Another reason for this error is if the `node_ref` is an
3144    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
3145    ///   a real `node_ref` obtained from `GetNodeRef`.
3146    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
3147    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
3148    ///   the needed rights expected on a real `node_ref`.
3149    /// * No other failing status codes are returned by this call.  However,
3150    ///   sysmem may add additional codes in future, so the client should have
3151    ///   sensible default handling for any failing status code.
3152    pub fn r#is_alternate_for(
3153        &self,
3154        mut payload: NodeIsAlternateForRequest,
3155    ) -> fidl::client::QueryResponseFut<
3156        NodeIsAlternateForResult,
3157        fidl::encoding::DefaultFuchsiaResourceDialect,
3158    > {
3159        BufferCollectionProxyInterface::r#is_alternate_for(self, payload)
3160    }
3161
3162    /// Get the buffer collection ID. This ID is also available from
3163    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
3164    /// within the collection).
3165    ///
3166    /// This call is mainly useful in situations where we can't convey a
3167    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
3168    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
3169    /// handle, which can be joined back up with a `BufferCollection` client end
3170    /// that was created via a different path. Prefer to convey a
3171    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
3172    ///
3173    /// Trusting a `buffer_collection_id` value from a source other than sysmem
3174    /// is analogous to trusting a koid value from a source other than zircon.
3175    /// Both should be avoided unless really necessary, and both require
3176    /// caution. In some situations it may be reasonable to refer to a
3177    /// pre-established `BufferCollection` by `buffer_collection_id` via a
3178    /// protocol for efficiency reasons, but an incoming value purporting to be
3179    /// a `buffer_collection_id` is not sufficient alone to justify granting the
3180    /// sender of the `buffer_collection_id` any capability. The sender must
3181    /// first prove to a receiver that the sender has/had a VMO or has/had a
3182    /// `BufferCollectionToken` to the same collection by sending a handle that
3183    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
3184    /// `buffer_collection_id` value. The receiver should take care to avoid
3185    /// assuming that a sender had a `BufferCollectionToken` in cases where the
3186    /// sender has only proven that the sender had a VMO.
3187    ///
3188    /// - response `buffer_collection_id` This ID is unique per buffer
3189    ///   collection per boot. Each buffer is uniquely identified by the
3190    ///   `buffer_collection_id` and `buffer_index` together.
3191    pub fn r#get_buffer_collection_id(
3192        &self,
3193    ) -> fidl::client::QueryResponseFut<
3194        NodeGetBufferCollectionIdResponse,
3195        fidl::encoding::DefaultFuchsiaResourceDialect,
3196    > {
3197        BufferCollectionProxyInterface::r#get_buffer_collection_id(self)
3198    }
3199
3200    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
3201    /// created after this message to weak, which means that a client's `Node`
3202    /// client end (or a child created after this message) is not alone
3203    /// sufficient to keep allocated VMOs alive.
3204    ///
3205    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
3206    /// `close_weak_asap`.
3207    ///
3208    /// This message is only permitted before the `Node` becomes ready for
3209    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
3210    ///   * `BufferCollectionToken`: any time
3211    ///   * `BufferCollection`: before `SetConstraints`
3212    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
3213    ///
3214    /// Currently, no conversion from strong `Node` to weak `Node` after ready
3215    /// for allocation is provided, but a client can simulate that by creating
3216    /// an additional `Node` before allocation and setting that additional
3217    /// `Node` to weak, and then potentially at some point later sending
3218    /// `Release` and closing the client end of the client's strong `Node`, but
3219    /// keeping the client's weak `Node`.
3220    ///
3221    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
3222    /// collection failure (all `Node` client end(s) will see
3223    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
3224    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
3225    /// this situation until all `Node`(s) are ready for allocation. For initial
3226    /// allocation to succeed, at least one strong `Node` is required to exist
3227    /// at allocation time, but after that client receives VMO handles, that
3228    /// client can `BufferCollection.Release` and close the client end without
3229    /// causing this type of failure.
3230    ///
3231    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
3232    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
3233    /// separately as appropriate.
3234    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
3235        BufferCollectionProxyInterface::r#set_weak(self)
3236    }
3237
3238    /// This indicates to sysmem that the client is prepared to pay attention to
3239    /// `close_weak_asap`.
3240    ///
3241    /// If sent, this message must be before
3242    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
3243    ///
3244    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
3245    /// send this message before `WaitForAllBuffersAllocated`, or a parent
3246    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
3247    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
3248    /// trigger buffer collection failure.
3249    ///
3250    /// This message is necessary because weak sysmem VMOs have not always been
3251    /// a thing, so older clients are not aware of the need to pay attention to
3252    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
3253    /// sysmem weak VMO handles asap. By having this message and requiring
3254    /// participants to indicate their acceptance of this aspect of the overall
3255    /// protocol, we avoid situations where an older client is delivered a weak
3256    /// VMO without any way for sysmem to get that VMO to close quickly later
3257    /// (and on a per-buffer basis).
3258    ///
3259    /// A participant that doesn't handle `close_weak_asap` and also doesn't
3260    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
3261    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
3262    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
3263    /// same participant has a child/delegate which does retrieve VMOs, that
3264    /// child/delegate will need to send `SetWeakOk` before
3265    /// `WaitForAllBuffersAllocated`.
3266    ///
3267    /// + request `for_child_nodes_also` If present and true, this means direct
3268    ///   child nodes of this node created after this message plus all
3269    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
3270    ///   those nodes. Any child node of this node that was created before this
3271    ///   message is not included. This setting is "sticky" in the sense that a
3272    ///   subsequent `SetWeakOk` without this bool set to true does not reset
3273    ///   the server-side bool. If this creates a problem for a participant, a
3274    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
3275    ///   tokens instead, as appropriate. A participant should only set
3276    ///   `for_child_nodes_also` true if the participant can really promise to
3277    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
3278    ///   weak VMO handles held by participants holding the corresponding child
3279    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
3280    ///   which are using sysmem(1) can be weak, despite the clients of those
3281    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
3282    ///   direct way to find out about `close_weak_asap`. This only applies to
3283    ///   descendents of this `Node` which are using sysmem(1), not to this
3284    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
3285    ///   token, which will fail allocation unless an ancestor of this `Node`
3286    ///   specified `for_child_nodes_also` true.
3287    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3288        BufferCollectionProxyInterface::r#set_weak_ok(self, payload)
3289    }
3290
3291    /// The server_end will be closed after this `Node` and any child nodes have
3292    /// have released their buffer counts, making those counts available for
3293    /// reservation by a different `Node` via
3294    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
3295    ///
3296    /// The `Node` buffer counts may not be released until the entire tree of
3297    /// `Node`(s) is closed or failed, because
3298    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
3299    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
3300    /// `Node` buffer counts remain reserved until the orphaned node is later
3301    /// cleaned up.
3302    ///
3303    /// If the `Node` exceeds a fairly large number of attached eventpair server
3304    /// ends, a log message will indicate this and the `Node` (and the
3305    /// appropriate) sub-tree will fail.
3306    ///
3307    /// The `server_end` will remain open when
3308    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
3309    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
3310    /// [`fuchsia.sysmem2/BufferCollection`].
3311    ///
3312    /// This message can also be used with a
3313    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
3314    pub fn r#attach_node_tracking(
3315        &self,
3316        mut payload: NodeAttachNodeTrackingRequest,
3317    ) -> Result<(), fidl::Error> {
3318        BufferCollectionProxyInterface::r#attach_node_tracking(self, payload)
3319    }
3320
3321    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
3322    /// collection.
3323    ///
3324    /// A participant may only call
3325    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
3326    /// [`fuchsia.sysmem2/BufferCollection`].
3327    ///
3328    /// For buffer allocation to be attempted, all holders of a
3329    /// `BufferCollection` client end need to call `SetConstraints` before
3330    /// sysmem will attempt to allocate buffers.
3331    ///
3332    /// + request `constraints` These are the constraints on the buffer
3333    ///   collection imposed by the sending client/participant.  The
3334    ///   `constraints` field is not required to be set. If not set, the client
3335    ///   is not setting any actual constraints, but is indicating that the
3336    ///   client has no constraints to set. A client that doesn't set the
3337    ///   `constraints` field won't receive any VMO handles, but can still find
3338    ///   out how many buffers were allocated and can still refer to buffers by
3339    ///   their `buffer_index`.
3340    pub fn r#set_constraints(
3341        &self,
3342        mut payload: BufferCollectionSetConstraintsRequest,
3343    ) -> Result<(), fidl::Error> {
3344        BufferCollectionProxyInterface::r#set_constraints(self, payload)
3345    }
3346
3347    /// Wait until all buffers are allocated.
3348    ///
3349    /// This FIDL call completes when buffers have been allocated, or completes
3350    /// with some failure detail if allocation has been attempted but failed.
3351    ///
3352    /// The following must occur before buffers will be allocated:
3353    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
3354    ///     collection must be turned in via `BindSharedCollection` to get a
3355    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
3356    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
3357    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
3358    ///     to them.
3359    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
3360    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
3361    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
3362    ///     sent to them.
3363    ///
3364    /// - result `buffer_collection_info` The VMO handles and other related
3365    ///   info.
3366    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
3367    ///   cannot be fulfilled due to resource exhaustion.
3368    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
3369    ///   malformed.
3370    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
3371    ///   request is valid but cannot be satisfied, perhaps due to hardware
3372    ///   limitations. This can happen if participants have incompatible
3373    ///   constraints (empty intersection, roughly speaking). See the log for
3374    ///   more info. In cases where a participant could potentially be treated
3375    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
3376    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
3377    ///   error code if there aren't enough buffers in the pre-existing
3378    ///   collection to satisfy the constraints set on the attached token and
3379    ///   any sub-tree of tokens derived from the attached token.
3380    pub fn r#wait_for_all_buffers_allocated(
3381        &self,
3382    ) -> fidl::client::QueryResponseFut<
3383        BufferCollectionWaitForAllBuffersAllocatedResult,
3384        fidl::encoding::DefaultFuchsiaResourceDialect,
3385    > {
3386        BufferCollectionProxyInterface::r#wait_for_all_buffers_allocated(self)
3387    }
3388
3389    /// Checks whether all the buffers have been allocated, in a polling
3390    /// fashion.
3391    ///
3392    /// * If the buffer collection has been allocated, returns success.
3393    /// * If the buffer collection failed allocation, returns the same
3394    ///   [`fuchsia.sysmem2/Error`] as
3395    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
3396    ///   return.
3397    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
3398    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
3399    ///   would not respond quickly.
3400    pub fn r#check_all_buffers_allocated(
3401        &self,
3402    ) -> fidl::client::QueryResponseFut<
3403        BufferCollectionCheckAllBuffersAllocatedResult,
3404        fidl::encoding::DefaultFuchsiaResourceDialect,
3405    > {
3406        BufferCollectionProxyInterface::r#check_all_buffers_allocated(self)
3407    }
3408
3409    /// Create a new token to add a new participant to an existing logical
3410    /// buffer collection, if the existing collection's buffer counts,
3411    /// constraints, and participants allow.
3412    ///
3413    /// This can be useful in replacing a failed participant, and/or in
3414    /// adding/re-adding a participant after buffers have already been
3415    /// allocated.
3416    ///
3417    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
3418    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
3419    /// goes through the normal procedure of setting constraints or closing
3420    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
3421    /// clients' point of view, despite the possibility that all the buffers
3422    /// were actually allocated previously. This process is called "logical
3423    /// allocation". Most instances of "allocation" in docs for other messages
3424    /// can also be read as "allocation or logical allocation" while remaining
3425    /// valid, but we just say "allocation" in most places for brevity/clarity
3426    /// of explanation, with the details of "logical allocation" left for the
3427    /// docs here on `AttachToken`.
3428    ///
3429    /// Failure of an attached `Node` does not propagate to the parent of the
3430    /// attached `Node`. More generally, failure of a child `Node` is blocked
3431    /// from reaching its parent `Node` if the child is attached, or if the
3432    /// child is dispensable and the failure occurred after logical allocation
3433    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
3434    ///
3435    /// A participant may in some scenarios choose to initially use a
3436    /// dispensable token for a given instance of a delegate participant, and
3437    /// then later if the first instance of that delegate participant fails, a
3438    /// new second instance of that delegate participant my be given a token
3439    /// created with `AttachToken`.
3440    ///
3441    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
3442    /// client end, the token acts like any other token. The client can
3443    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
3444    /// and can send the token to a different process/participant. The
3445    /// `BufferCollectionToken` `Node` should be converted to a
3446    /// `BufferCollection` `Node` as normal by sending
3447    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
3448    /// without causing subtree failure by sending
3449    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
3450    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
3451    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
3452    /// the `BufferCollection`.
3453    ///
3454    /// Within the subtree, a success result from
3455    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
3456    /// the subtree participants' constraints were satisfiable using the
3457    /// already-existing buffer collection, the already-established
3458    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
3459    /// constraints, and the already-existing other participants (already added
3460    /// via successful logical allocation) and their specified buffer counts in
3461    /// their constraints. A failure result means the new participants'
3462    /// constraints cannot be satisfied using the existing buffer collection and
3463    /// its already-added participants. Creating a new collection instead may
3464    /// allow all participants' constraints to be satisfied, assuming
3465    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
3466    /// used.
3467    ///
3468    /// A token created with `AttachToken` performs constraints aggregation with
3469    /// all constraints currently in effect on the buffer collection, plus the
3470    /// attached token under consideration plus child tokens under the attached
3471    /// token which are not themselves an attached token or under such a token.
3472    /// Further subtrees under this subtree are considered for logical
3473    /// allocation only after this subtree has completed logical allocation.
3474    ///
3475    /// Assignment of existing buffers to participants'
3476    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
3477    /// etc is first-come first-served, but a child can't logically allocate
3478    /// before all its parents have sent `SetConstraints`.
3479    ///
3480    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
3481    /// in contrast to `AttachToken`, has the created token `Node` + child
3482    /// `Node`(s) (in the created subtree but not in any subtree under this
3483    /// subtree) participate in constraints aggregation along with its parent
3484    /// during the parent's allocation or logical allocation.
3485    ///
3486    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
3487    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
3488    /// sysmem before the new token can be passed to `BindSharedCollection`. The
3489    /// `Sync` of the new token can be accomplished with
3490    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
3491    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
3492    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
3493    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
3494    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
3495    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
3496    /// created token, to also sync those additional tokens to sysmem using a
3497    /// single round-trip.
3498    ///
3499    /// All table fields are currently required.
3500    ///
3501    /// + request `rights_attentuation_mask` This allows attenuating the VMO
3502    ///   rights of the subtree. These values for `rights_attenuation_mask`
3503    ///   result in no attenuation (note that 0 is not on this list):
3504    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
3505    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
3506    /// + request `token_request` The server end of the `BufferCollectionToken`
3507    ///   channel. The client retains the client end.
3508    pub fn r#attach_token(
3509        &self,
3510        mut payload: BufferCollectionAttachTokenRequest,
3511    ) -> Result<(), fidl::Error> {
3512        BufferCollectionProxyInterface::r#attach_token(self, payload)
3513    }
3514
3515    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
3516    /// buffers have been allocated and only the specified number of buffers (or
3517    /// fewer) remain in the buffer collection.
3518    ///
3519    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
3520    /// client to wait until an old buffer collection is fully or mostly
3521    /// deallocated before attempting allocation of a new buffer collection. The
3522    /// eventpair is only signalled when the buffers of this collection have
3523    /// been fully deallocated (not just un-referenced by clients, but all the
3524    /// memory consumed by those buffers has been fully reclaimed/recycled), or
3525    /// when allocation or logical allocation fails for the tree or subtree
3526    /// including this [`fuchsia.sysmem2/BufferCollection`].
3527    ///
3528    /// The eventpair won't be signalled until allocation or logical allocation
3529    /// has completed; until then, the collection's current buffer count is
3530    /// ignored.
3531    ///
3532    /// If logical allocation fails for an attached subtree (using
3533    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
3534    /// eventpair will close during that failure regardless of the number of
3535    /// buffers potenitally allocated in the overall buffer collection. This is
3536    /// for logical allocation consistency with normal allocation.
3537    ///
3538    /// The lifetime signalled by this event includes asynchronous cleanup of
3539    /// allocated buffers, and this asynchronous cleanup cannot occur until all
3540    /// holders of VMO handles to the buffers have closed those VMO handles.
3541    /// Therefore, clients should take care not to become blocked forever
3542    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
3543    /// participants using the logical buffer collection (including the waiter
3544    /// itself) are less trusted, less reliable, or potentially blocked by the
3545    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
3546    /// for the client wait may be prudent, depending on details of how the
3547    /// collection and/or its VMOs are used or shared. Failure to allocate a
3548    /// new/replacement buffer collection is better than getting stuck forever.
3549    ///
3550    /// The sysmem server itself intentionally does not perform any waiting on
3551    /// already-failed collections' VMOs to finish cleaning up before attempting
3552    /// a new allocation, and the sysmem server intentionally doesn't retry
3553    /// allocation if a new allocation fails due to out of memory, even if that
3554    /// failure is potentially due to continued existence of an old collection's
3555    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
3556    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
3557    /// as long as the waiting client is careful to not create a deadlock.
3558    ///
3559    /// Continued existence of old collections that are still cleaning up is not
3560    /// the only reason that a new allocation may fail due to insufficient
3561    /// memory, even if the new allocation is allocating physically contiguous
3562    /// buffers. Overall system memory pressure can also be the cause of failure
3563    /// to allocate a new collection. See also
3564    /// [`fuchsia.memorypressure/Provider`].
3565    ///
3566    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
3567    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
3568    /// `eventpair` handle (server end) can be sent via more than one
3569    /// `AttachLifetimeTracking` message to different protocols, and the
3570    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
3571    /// the conditions are met (all holders of duplicates have closed their
3572    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
3573    /// client end can (also) be duplicated without preventing the
3574    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
3575    ///
3576    /// The server intentionally doesn't "trust" any signals set on the
3577    /// `server_end`. This mechanism intentionally uses only
3578    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
3579    /// "early", and is only set when all handles to the server end eventpair
3580    /// are closed. No meaning is associated with any of the other signals, and
3581    /// clients should ignore any other signal bits on either end of the
3582    /// `eventpair`.
3583    ///
3584    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
3585    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
3586    /// transfer without causing `BufferCollection` channel failure).
3587    ///
3588    /// All table fields are currently required.
3589    ///
3590    /// + request `server_end` This eventpair handle will be closed by the
3591    ///   sysmem server when buffers have been allocated initially and the
3592    ///   number of buffers is then less than or equal to `buffers_remaining`.
3593    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
3594    ///   fewer) buffers to be fully deallocated. A number greater than zero can
3595    ///   be useful in situations where a known number of buffers are
3596    ///   intentionally not closed so that the data can continue to be used,
3597    ///   such as for keeping the last available video frame displayed in the UI
3598    ///   even if the video stream was using protected output buffers. It's
3599    ///   outside the scope of the `BufferCollection` interface (at least for
3600    ///   now) to determine how many buffers may be held without closing, but
3601    ///   it'll typically be in the range 0-2.
3602    pub fn r#attach_lifetime_tracking(
3603        &self,
3604        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3605    ) -> Result<(), fidl::Error> {
3606        BufferCollectionProxyInterface::r#attach_lifetime_tracking(self, payload)
3607    }
3608}
3609
3610impl BufferCollectionProxyInterface for BufferCollectionProxy {
3611    type SyncResponseFut =
3612        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
3613    fn r#sync(&self) -> Self::SyncResponseFut {
3614        fn _decode(
3615            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3616        ) -> Result<(), fidl::Error> {
3617            let _response = fidl::client::decode_transaction_body::<
3618                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
3619                fidl::encoding::DefaultFuchsiaResourceDialect,
3620                0x11ac2555cf575b54,
3621            >(_buf?)?
3622            .into_result::<BufferCollectionMarker>("sync")?;
3623            Ok(_response)
3624        }
3625        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
3626            (),
3627            0x11ac2555cf575b54,
3628            fidl::encoding::DynamicFlags::FLEXIBLE,
3629            _decode,
3630        )
3631    }
3632
3633    fn r#release(&self) -> Result<(), fidl::Error> {
3634        self.client.send::<fidl::encoding::EmptyPayload>(
3635            (),
3636            0x6a5cae7d6d6e04c6,
3637            fidl::encoding::DynamicFlags::FLEXIBLE,
3638        )
3639    }
3640
3641    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
3642        self.client.send::<NodeSetNameRequest>(
3643            payload,
3644            0xb41f1624f48c1e9,
3645            fidl::encoding::DynamicFlags::FLEXIBLE,
3646        )
3647    }
3648
3649    fn r#set_debug_client_info(
3650        &self,
3651        mut payload: &NodeSetDebugClientInfoRequest,
3652    ) -> Result<(), fidl::Error> {
3653        self.client.send::<NodeSetDebugClientInfoRequest>(
3654            payload,
3655            0x5cde8914608d99b1,
3656            fidl::encoding::DynamicFlags::FLEXIBLE,
3657        )
3658    }
3659
3660    fn r#set_debug_timeout_log_deadline(
3661        &self,
3662        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3663    ) -> Result<(), fidl::Error> {
3664        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
3665            payload,
3666            0x716b0af13d5c0806,
3667            fidl::encoding::DynamicFlags::FLEXIBLE,
3668        )
3669    }
3670
3671    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3672        self.client.send::<fidl::encoding::EmptyPayload>(
3673            (),
3674            0x5209c77415b4dfad,
3675            fidl::encoding::DynamicFlags::FLEXIBLE,
3676        )
3677    }
3678
3679    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
3680        NodeGetNodeRefResponse,
3681        fidl::encoding::DefaultFuchsiaResourceDialect,
3682    >;
3683    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
3684        fn _decode(
3685            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3686        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
3687            let _response = fidl::client::decode_transaction_body::<
3688                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
3689                fidl::encoding::DefaultFuchsiaResourceDialect,
3690                0x5b3d0e51614df053,
3691            >(_buf?)?
3692            .into_result::<BufferCollectionMarker>("get_node_ref")?;
3693            Ok(_response)
3694        }
3695        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
3696            (),
3697            0x5b3d0e51614df053,
3698            fidl::encoding::DynamicFlags::FLEXIBLE,
3699            _decode,
3700        )
3701    }
3702
3703    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
3704        NodeIsAlternateForResult,
3705        fidl::encoding::DefaultFuchsiaResourceDialect,
3706    >;
3707    fn r#is_alternate_for(
3708        &self,
3709        mut payload: NodeIsAlternateForRequest,
3710    ) -> Self::IsAlternateForResponseFut {
3711        fn _decode(
3712            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3713        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
3714            let _response = fidl::client::decode_transaction_body::<
3715                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
3716                fidl::encoding::DefaultFuchsiaResourceDialect,
3717                0x3a58e00157e0825,
3718            >(_buf?)?
3719            .into_result::<BufferCollectionMarker>("is_alternate_for")?;
3720            Ok(_response.map(|x| x))
3721        }
3722        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
3723            &mut payload,
3724            0x3a58e00157e0825,
3725            fidl::encoding::DynamicFlags::FLEXIBLE,
3726            _decode,
3727        )
3728    }
3729
3730    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
3731        NodeGetBufferCollectionIdResponse,
3732        fidl::encoding::DefaultFuchsiaResourceDialect,
3733    >;
3734    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
3735        fn _decode(
3736            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3737        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
3738            let _response = fidl::client::decode_transaction_body::<
3739                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
3740                fidl::encoding::DefaultFuchsiaResourceDialect,
3741                0x77d19a494b78ba8c,
3742            >(_buf?)?
3743            .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
3744            Ok(_response)
3745        }
3746        self.client.send_query_and_decode::<
3747            fidl::encoding::EmptyPayload,
3748            NodeGetBufferCollectionIdResponse,
3749        >(
3750            (),
3751            0x77d19a494b78ba8c,
3752            fidl::encoding::DynamicFlags::FLEXIBLE,
3753            _decode,
3754        )
3755    }
3756
3757    fn r#set_weak(&self) -> Result<(), fidl::Error> {
3758        self.client.send::<fidl::encoding::EmptyPayload>(
3759            (),
3760            0x22dd3ea514eeffe1,
3761            fidl::encoding::DynamicFlags::FLEXIBLE,
3762        )
3763    }
3764
3765    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3766        self.client.send::<NodeSetWeakOkRequest>(
3767            &mut payload,
3768            0x38a44fc4d7724be9,
3769            fidl::encoding::DynamicFlags::FLEXIBLE,
3770        )
3771    }
3772
3773    fn r#attach_node_tracking(
3774        &self,
3775        mut payload: NodeAttachNodeTrackingRequest,
3776    ) -> Result<(), fidl::Error> {
3777        self.client.send::<NodeAttachNodeTrackingRequest>(
3778            &mut payload,
3779            0x3f22f2a293d3cdac,
3780            fidl::encoding::DynamicFlags::FLEXIBLE,
3781        )
3782    }
3783
3784    fn r#set_constraints(
3785        &self,
3786        mut payload: BufferCollectionSetConstraintsRequest,
3787    ) -> Result<(), fidl::Error> {
3788        self.client.send::<BufferCollectionSetConstraintsRequest>(
3789            &mut payload,
3790            0x1fde0f19d650197b,
3791            fidl::encoding::DynamicFlags::FLEXIBLE,
3792        )
3793    }
3794
3795    type WaitForAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3796        BufferCollectionWaitForAllBuffersAllocatedResult,
3797        fidl::encoding::DefaultFuchsiaResourceDialect,
3798    >;
3799    fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut {
3800        fn _decode(
3801            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3802        ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
3803            let _response = fidl::client::decode_transaction_body::<
3804                fidl::encoding::FlexibleResultType<
3805                    BufferCollectionWaitForAllBuffersAllocatedResponse,
3806                    Error,
3807                >,
3808                fidl::encoding::DefaultFuchsiaResourceDialect,
3809                0x62300344b61404e,
3810            >(_buf?)?
3811            .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
3812            Ok(_response.map(|x| x))
3813        }
3814        self.client.send_query_and_decode::<
3815            fidl::encoding::EmptyPayload,
3816            BufferCollectionWaitForAllBuffersAllocatedResult,
3817        >(
3818            (),
3819            0x62300344b61404e,
3820            fidl::encoding::DynamicFlags::FLEXIBLE,
3821            _decode,
3822        )
3823    }
3824
3825    type CheckAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3826        BufferCollectionCheckAllBuffersAllocatedResult,
3827        fidl::encoding::DefaultFuchsiaResourceDialect,
3828    >;
3829    fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut {
3830        fn _decode(
3831            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3832        ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
3833            let _response = fidl::client::decode_transaction_body::<
3834                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
3835                fidl::encoding::DefaultFuchsiaResourceDialect,
3836                0x35a5fe77ce939c10,
3837            >(_buf?)?
3838            .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
3839            Ok(_response.map(|x| x))
3840        }
3841        self.client.send_query_and_decode::<
3842            fidl::encoding::EmptyPayload,
3843            BufferCollectionCheckAllBuffersAllocatedResult,
3844        >(
3845            (),
3846            0x35a5fe77ce939c10,
3847            fidl::encoding::DynamicFlags::FLEXIBLE,
3848            _decode,
3849        )
3850    }
3851
3852    fn r#attach_token(
3853        &self,
3854        mut payload: BufferCollectionAttachTokenRequest,
3855    ) -> Result<(), fidl::Error> {
3856        self.client.send::<BufferCollectionAttachTokenRequest>(
3857            &mut payload,
3858            0x46ac7d0008492982,
3859            fidl::encoding::DynamicFlags::FLEXIBLE,
3860        )
3861    }
3862
3863    fn r#attach_lifetime_tracking(
3864        &self,
3865        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3866    ) -> Result<(), fidl::Error> {
3867        self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
3868            &mut payload,
3869            0x3ecb510113116dcf,
3870            fidl::encoding::DynamicFlags::FLEXIBLE,
3871        )
3872    }
3873}
3874
3875pub struct BufferCollectionEventStream {
3876    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
3877}
3878
3879impl std::marker::Unpin for BufferCollectionEventStream {}
3880
3881impl futures::stream::FusedStream for BufferCollectionEventStream {
3882    fn is_terminated(&self) -> bool {
3883        self.event_receiver.is_terminated()
3884    }
3885}
3886
3887impl futures::Stream for BufferCollectionEventStream {
3888    type Item = Result<BufferCollectionEvent, fidl::Error>;
3889
3890    fn poll_next(
3891        mut self: std::pin::Pin<&mut Self>,
3892        cx: &mut std::task::Context<'_>,
3893    ) -> std::task::Poll<Option<Self::Item>> {
3894        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
3895            &mut self.event_receiver,
3896            cx
3897        )?) {
3898            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionEvent::decode(buf))),
3899            None => std::task::Poll::Ready(None),
3900        }
3901    }
3902}
3903
3904#[derive(Debug)]
3905pub enum BufferCollectionEvent {
3906    #[non_exhaustive]
3907    _UnknownEvent {
3908        /// Ordinal of the event that was sent.
3909        ordinal: u64,
3910    },
3911}
3912
3913impl BufferCollectionEvent {
3914    /// Decodes a message buffer as a [`BufferCollectionEvent`].
3915    fn decode(
3916        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
3917    ) -> Result<BufferCollectionEvent, fidl::Error> {
3918        let (bytes, _handles) = buf.split_mut();
3919        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3920        debug_assert_eq!(tx_header.tx_id, 0);
3921        match tx_header.ordinal {
3922            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
3923                Ok(BufferCollectionEvent::_UnknownEvent { ordinal: tx_header.ordinal })
3924            }
3925            _ => Err(fidl::Error::UnknownOrdinal {
3926                ordinal: tx_header.ordinal,
3927                protocol_name:
3928                    <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
3929            }),
3930        }
3931    }
3932}
3933
3934/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollection.
3935pub struct BufferCollectionRequestStream {
3936    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3937    is_terminated: bool,
3938}
3939
3940impl std::marker::Unpin for BufferCollectionRequestStream {}
3941
3942impl futures::stream::FusedStream for BufferCollectionRequestStream {
3943    fn is_terminated(&self) -> bool {
3944        self.is_terminated
3945    }
3946}
3947
3948impl fidl::endpoints::RequestStream for BufferCollectionRequestStream {
3949    type Protocol = BufferCollectionMarker;
3950    type ControlHandle = BufferCollectionControlHandle;
3951
3952    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
3953        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
3954    }
3955
3956    fn control_handle(&self) -> Self::ControlHandle {
3957        BufferCollectionControlHandle { inner: self.inner.clone() }
3958    }
3959
3960    fn into_inner(
3961        self,
3962    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
3963    {
3964        (self.inner, self.is_terminated)
3965    }
3966
3967    fn from_inner(
3968        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3969        is_terminated: bool,
3970    ) -> Self {
3971        Self { inner, is_terminated }
3972    }
3973}
3974
3975impl futures::Stream for BufferCollectionRequestStream {
3976    type Item = Result<BufferCollectionRequest, fidl::Error>;
3977
3978    fn poll_next(
3979        mut self: std::pin::Pin<&mut Self>,
3980        cx: &mut std::task::Context<'_>,
3981    ) -> std::task::Poll<Option<Self::Item>> {
3982        let this = &mut *self;
3983        if this.inner.check_shutdown(cx) {
3984            this.is_terminated = true;
3985            return std::task::Poll::Ready(None);
3986        }
3987        if this.is_terminated {
3988            panic!("polled BufferCollectionRequestStream after completion");
3989        }
3990        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
3991            |bytes, handles| {
3992                match this.inner.channel().read_etc(cx, bytes, handles) {
3993                    std::task::Poll::Ready(Ok(())) => {}
3994                    std::task::Poll::Pending => return std::task::Poll::Pending,
3995                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
3996                        this.is_terminated = true;
3997                        return std::task::Poll::Ready(None);
3998                    }
3999                    std::task::Poll::Ready(Err(e)) => {
4000                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
4001                            e.into(),
4002                        ))));
4003                    }
4004                }
4005
4006                // A message has been received from the channel
4007                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
4008
4009                std::task::Poll::Ready(Some(match header.ordinal {
4010                    0x11ac2555cf575b54 => {
4011                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4012                        let mut req = fidl::new_empty!(
4013                            fidl::encoding::EmptyPayload,
4014                            fidl::encoding::DefaultFuchsiaResourceDialect
4015                        );
4016                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4017                        let control_handle =
4018                            BufferCollectionControlHandle { inner: this.inner.clone() };
4019                        Ok(BufferCollectionRequest::Sync {
4020                            responder: BufferCollectionSyncResponder {
4021                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4022                                tx_id: header.tx_id,
4023                            },
4024                        })
4025                    }
4026                    0x6a5cae7d6d6e04c6 => {
4027                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4028                        let mut req = fidl::new_empty!(
4029                            fidl::encoding::EmptyPayload,
4030                            fidl::encoding::DefaultFuchsiaResourceDialect
4031                        );
4032                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4033                        let control_handle =
4034                            BufferCollectionControlHandle { inner: this.inner.clone() };
4035                        Ok(BufferCollectionRequest::Release { control_handle })
4036                    }
4037                    0xb41f1624f48c1e9 => {
4038                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4039                        let mut req = fidl::new_empty!(
4040                            NodeSetNameRequest,
4041                            fidl::encoding::DefaultFuchsiaResourceDialect
4042                        );
4043                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
4044                        let control_handle =
4045                            BufferCollectionControlHandle { inner: this.inner.clone() };
4046                        Ok(BufferCollectionRequest::SetName { payload: req, control_handle })
4047                    }
4048                    0x5cde8914608d99b1 => {
4049                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4050                        let mut req = fidl::new_empty!(
4051                            NodeSetDebugClientInfoRequest,
4052                            fidl::encoding::DefaultFuchsiaResourceDialect
4053                        );
4054                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
4055                        let control_handle =
4056                            BufferCollectionControlHandle { inner: this.inner.clone() };
4057                        Ok(BufferCollectionRequest::SetDebugClientInfo {
4058                            payload: req,
4059                            control_handle,
4060                        })
4061                    }
4062                    0x716b0af13d5c0806 => {
4063                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4064                        let mut req = fidl::new_empty!(
4065                            NodeSetDebugTimeoutLogDeadlineRequest,
4066                            fidl::encoding::DefaultFuchsiaResourceDialect
4067                        );
4068                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
4069                        let control_handle =
4070                            BufferCollectionControlHandle { inner: this.inner.clone() };
4071                        Ok(BufferCollectionRequest::SetDebugTimeoutLogDeadline {
4072                            payload: req,
4073                            control_handle,
4074                        })
4075                    }
4076                    0x5209c77415b4dfad => {
4077                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4078                        let mut req = fidl::new_empty!(
4079                            fidl::encoding::EmptyPayload,
4080                            fidl::encoding::DefaultFuchsiaResourceDialect
4081                        );
4082                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4083                        let control_handle =
4084                            BufferCollectionControlHandle { inner: this.inner.clone() };
4085                        Ok(BufferCollectionRequest::SetVerboseLogging { control_handle })
4086                    }
4087                    0x5b3d0e51614df053 => {
4088                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4089                        let mut req = fidl::new_empty!(
4090                            fidl::encoding::EmptyPayload,
4091                            fidl::encoding::DefaultFuchsiaResourceDialect
4092                        );
4093                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4094                        let control_handle =
4095                            BufferCollectionControlHandle { inner: this.inner.clone() };
4096                        Ok(BufferCollectionRequest::GetNodeRef {
4097                            responder: BufferCollectionGetNodeRefResponder {
4098                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4099                                tx_id: header.tx_id,
4100                            },
4101                        })
4102                    }
4103                    0x3a58e00157e0825 => {
4104                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4105                        let mut req = fidl::new_empty!(
4106                            NodeIsAlternateForRequest,
4107                            fidl::encoding::DefaultFuchsiaResourceDialect
4108                        );
4109                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
4110                        let control_handle =
4111                            BufferCollectionControlHandle { inner: this.inner.clone() };
4112                        Ok(BufferCollectionRequest::IsAlternateFor {
4113                            payload: req,
4114                            responder: BufferCollectionIsAlternateForResponder {
4115                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4116                                tx_id: header.tx_id,
4117                            },
4118                        })
4119                    }
4120                    0x77d19a494b78ba8c => {
4121                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4122                        let mut req = fidl::new_empty!(
4123                            fidl::encoding::EmptyPayload,
4124                            fidl::encoding::DefaultFuchsiaResourceDialect
4125                        );
4126                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4127                        let control_handle =
4128                            BufferCollectionControlHandle { inner: this.inner.clone() };
4129                        Ok(BufferCollectionRequest::GetBufferCollectionId {
4130                            responder: BufferCollectionGetBufferCollectionIdResponder {
4131                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4132                                tx_id: header.tx_id,
4133                            },
4134                        })
4135                    }
4136                    0x22dd3ea514eeffe1 => {
4137                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4138                        let mut req = fidl::new_empty!(
4139                            fidl::encoding::EmptyPayload,
4140                            fidl::encoding::DefaultFuchsiaResourceDialect
4141                        );
4142                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4143                        let control_handle =
4144                            BufferCollectionControlHandle { inner: this.inner.clone() };
4145                        Ok(BufferCollectionRequest::SetWeak { control_handle })
4146                    }
4147                    0x38a44fc4d7724be9 => {
4148                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4149                        let mut req = fidl::new_empty!(
4150                            NodeSetWeakOkRequest,
4151                            fidl::encoding::DefaultFuchsiaResourceDialect
4152                        );
4153                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
4154                        let control_handle =
4155                            BufferCollectionControlHandle { inner: this.inner.clone() };
4156                        Ok(BufferCollectionRequest::SetWeakOk { payload: req, control_handle })
4157                    }
4158                    0x3f22f2a293d3cdac => {
4159                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4160                        let mut req = fidl::new_empty!(
4161                            NodeAttachNodeTrackingRequest,
4162                            fidl::encoding::DefaultFuchsiaResourceDialect
4163                        );
4164                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4165                        let control_handle =
4166                            BufferCollectionControlHandle { inner: this.inner.clone() };
4167                        Ok(BufferCollectionRequest::AttachNodeTracking {
4168                            payload: req,
4169                            control_handle,
4170                        })
4171                    }
4172                    0x1fde0f19d650197b => {
4173                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4174                        let mut req = fidl::new_empty!(
4175                            BufferCollectionSetConstraintsRequest,
4176                            fidl::encoding::DefaultFuchsiaResourceDialect
4177                        );
4178                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionSetConstraintsRequest>(&header, _body_bytes, handles, &mut req)?;
4179                        let control_handle =
4180                            BufferCollectionControlHandle { inner: this.inner.clone() };
4181                        Ok(BufferCollectionRequest::SetConstraints { payload: req, control_handle })
4182                    }
4183                    0x62300344b61404e => {
4184                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4185                        let mut req = fidl::new_empty!(
4186                            fidl::encoding::EmptyPayload,
4187                            fidl::encoding::DefaultFuchsiaResourceDialect
4188                        );
4189                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4190                        let control_handle =
4191                            BufferCollectionControlHandle { inner: this.inner.clone() };
4192                        Ok(BufferCollectionRequest::WaitForAllBuffersAllocated {
4193                            responder: BufferCollectionWaitForAllBuffersAllocatedResponder {
4194                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4195                                tx_id: header.tx_id,
4196                            },
4197                        })
4198                    }
4199                    0x35a5fe77ce939c10 => {
4200                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4201                        let mut req = fidl::new_empty!(
4202                            fidl::encoding::EmptyPayload,
4203                            fidl::encoding::DefaultFuchsiaResourceDialect
4204                        );
4205                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4206                        let control_handle =
4207                            BufferCollectionControlHandle { inner: this.inner.clone() };
4208                        Ok(BufferCollectionRequest::CheckAllBuffersAllocated {
4209                            responder: BufferCollectionCheckAllBuffersAllocatedResponder {
4210                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4211                                tx_id: header.tx_id,
4212                            },
4213                        })
4214                    }
4215                    0x46ac7d0008492982 => {
4216                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4217                        let mut req = fidl::new_empty!(
4218                            BufferCollectionAttachTokenRequest,
4219                            fidl::encoding::DefaultFuchsiaResourceDialect
4220                        );
4221                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachTokenRequest>(&header, _body_bytes, handles, &mut req)?;
4222                        let control_handle =
4223                            BufferCollectionControlHandle { inner: this.inner.clone() };
4224                        Ok(BufferCollectionRequest::AttachToken { payload: req, control_handle })
4225                    }
4226                    0x3ecb510113116dcf => {
4227                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4228                        let mut req = fidl::new_empty!(
4229                            BufferCollectionAttachLifetimeTrackingRequest,
4230                            fidl::encoding::DefaultFuchsiaResourceDialect
4231                        );
4232                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachLifetimeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4233                        let control_handle =
4234                            BufferCollectionControlHandle { inner: this.inner.clone() };
4235                        Ok(BufferCollectionRequest::AttachLifetimeTracking {
4236                            payload: req,
4237                            control_handle,
4238                        })
4239                    }
4240                    _ if header.tx_id == 0
4241                        && header
4242                            .dynamic_flags()
4243                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4244                    {
4245                        Ok(BufferCollectionRequest::_UnknownMethod {
4246                            ordinal: header.ordinal,
4247                            control_handle: BufferCollectionControlHandle {
4248                                inner: this.inner.clone(),
4249                            },
4250                            method_type: fidl::MethodType::OneWay,
4251                        })
4252                    }
4253                    _ if header
4254                        .dynamic_flags()
4255                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4256                    {
4257                        this.inner.send_framework_err(
4258                            fidl::encoding::FrameworkErr::UnknownMethod,
4259                            header.tx_id,
4260                            header.ordinal,
4261                            header.dynamic_flags(),
4262                            (bytes, handles),
4263                        )?;
4264                        Ok(BufferCollectionRequest::_UnknownMethod {
4265                            ordinal: header.ordinal,
4266                            control_handle: BufferCollectionControlHandle {
4267                                inner: this.inner.clone(),
4268                            },
4269                            method_type: fidl::MethodType::TwoWay,
4270                        })
4271                    }
4272                    _ => Err(fidl::Error::UnknownOrdinal {
4273                        ordinal: header.ordinal,
4274                        protocol_name:
4275                            <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
4276                    }),
4277                }))
4278            },
4279        )
4280    }
4281}
4282
4283/// [`fuchsia.sysmem2/BufferCollection`] is a connection directly from a
4284/// participant to sysmem re. a buffer collection; often the buffer collection
4285/// is shared with other participants which have their own `BufferCollection`
4286/// client end(s) associated with the same buffer collection.  In other words,
4287/// an instance of the `BufferCollection` interface is a view of a buffer
4288/// collection, not the buffer collection itself.
4289///
4290/// The `BufferCollection` connection exists to facilitate async indication of
4291/// when the buffer collection has been populated with buffers.
4292///
4293/// Also, the channel's closure by the sysmem server is an indication to the
4294/// client that the client should close all VMO handles that were obtained from
4295/// the `BufferCollection` ASAP.
4296///
4297/// Some buffer collections can use enough memory that it can be worth avoiding
4298/// allocation overlap (in time) using
4299/// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] so that the
4300/// initiator can tell when enough buffers of the buffer collection have been
4301/// fully deallocated prior to the initiator allocating a new buffer collection.
4302///
4303/// Epitaphs are not used in this protocol.
4304#[derive(Debug)]
4305pub enum BufferCollectionRequest {
4306    /// Ensure that previous messages have been received server side. This is
4307    /// particularly useful after previous messages that created new tokens,
4308    /// because a token must be known to the sysmem server before sending the
4309    /// token to another participant.
4310    ///
4311    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
4312    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
4313    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
4314    /// to mitigate the possibility of a hostile/fake
4315    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
4316    /// Another way is to pass the token to
4317    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
4318    /// the token as part of exchanging it for a
4319    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
4320    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
4321    /// of stalling.
4322    ///
4323    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
4324    /// and then starting and completing a `Sync`, it's then safe to send the
4325    /// `BufferCollectionToken` client ends to other participants knowing the
4326    /// server will recognize the tokens when they're sent by the other
4327    /// participants to sysmem in a
4328    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
4329    /// efficient way to create tokens while avoiding unnecessary round trips.
4330    ///
4331    /// Other options include waiting for each
4332    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
4333    /// individually (using separate call to `Sync` after each), or calling
4334    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
4335    /// converted to a `BufferCollection` via
4336    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
4337    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
4338    /// the sync step and can create multiple tokens at once.
4339    Sync { responder: BufferCollectionSyncResponder },
4340    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
4341    ///
4342    /// Normally a participant will convert a `BufferCollectionToken` into a
4343    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
4344    /// `Release` via the token (and then close the channel immediately or
4345    /// shortly later in response to server closing the server end), which
4346    /// avoids causing buffer collection failure. Without a prior `Release`,
4347    /// closing the `BufferCollectionToken` client end will cause buffer
4348    /// collection failure.
4349    ///
4350    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
4351    ///
4352    /// By default the server handles unexpected closure of a
4353    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
4354    /// first) by failing the buffer collection. Partly this is to expedite
4355    /// closing VMO handles to reclaim memory when any participant fails. If a
4356    /// participant would like to cleanly close a `BufferCollection` without
4357    /// causing buffer collection failure, the participant can send `Release`
4358    /// before closing the `BufferCollection` client end. The `Release` can
4359    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
4360    /// buffer collection won't require constraints from this node in order to
4361    /// allocate. If after `SetConstraints`, the constraints are retained and
4362    /// aggregated, despite the lack of `BufferCollection` connection at the
4363    /// time of constraints aggregation.
4364    ///
4365    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
4366    ///
4367    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
4368    /// end (without `Release` first) will trigger failure of the buffer
4369    /// collection. To close a `BufferCollectionTokenGroup` channel without
4370    /// failing the buffer collection, ensure that AllChildrenPresent() has been
4371    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
4372    /// client end.
4373    ///
4374    /// If `Release` occurs before
4375    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
4376    /// buffer collection will fail (triggered by reception of `Release` without
4377    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
4378    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
4379    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
4380    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
4381    /// close requires `AllChildrenPresent` (if not already sent), then
4382    /// `Release`, then close client end.
4383    ///
4384    /// If `Release` occurs after `AllChildrenPresent`, the children and all
4385    /// their constraints remain intact (just as they would if the
4386    /// `BufferCollectionTokenGroup` channel had remained open), and the client
4387    /// end close doesn't trigger buffer collection failure.
4388    ///
4389    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
4390    ///
4391    /// For brevity, the per-channel-protocol paragraphs above ignore the
4392    /// separate failure domain created by
4393    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
4394    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
4395    /// unexpectedly closes (without `Release` first) and that client end is
4396    /// under a failure domain, instead of failing the whole buffer collection,
4397    /// the failure domain is failed, but the buffer collection itself is
4398    /// isolated from failure of the failure domain. Such failure domains can be
4399    /// nested, in which case only the inner-most failure domain in which the
4400    /// `Node` resides fails.
4401    Release { control_handle: BufferCollectionControlHandle },
4402    /// Set a name for VMOs in this buffer collection.
4403    ///
4404    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
4405    /// will be truncated to fit. The name of the vmo will be suffixed with the
4406    /// buffer index within the collection (if the suffix fits within
4407    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
4408    /// listed in the inspect data.
4409    ///
4410    /// The name only affects VMOs allocated after the name is set; this call
4411    /// does not rename existing VMOs. If multiple clients set different names
4412    /// then the larger priority value will win. Setting a new name with the
4413    /// same priority as a prior name doesn't change the name.
4414    ///
4415    /// All table fields are currently required.
4416    ///
4417    /// + request `priority` The name is only set if this is the first `SetName`
4418    ///   or if `priority` is greater than any previous `priority` value in
4419    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
4420    /// + request `name` The name for VMOs created under this buffer collection.
4421    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionControlHandle },
4422    /// Set information about the current client that can be used by sysmem to
4423    /// help diagnose leaking memory and allocation stalls waiting for a
4424    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
4425    ///
4426    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
4427    /// `Node`(s) derived from this `Node`, unless overriden by
4428    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
4429    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
4430    ///
4431    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
4432    /// `Allocator` is the most efficient way to ensure that all
4433    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
4434    /// set, and is also more efficient than separately sending the same debug
4435    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
4436    /// created [`fuchsia.sysmem2/Node`].
4437    ///
4438    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
4439    /// indicate which client is closing their channel first, leading to subtree
4440    /// failure (which can be normal if the purpose of the subtree is over, but
4441    /// if happening earlier than expected, the client-channel-specific name can
4442    /// help diagnose where the failure is first coming from, from sysmem's
4443    /// point of view).
4444    ///
4445    /// All table fields are currently required.
4446    ///
4447    /// + request `name` This can be an arbitrary string, but the current
4448    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
4449    /// + request `id` This can be an arbitrary id, but the current process ID
4450    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
4451    SetDebugClientInfo {
4452        payload: NodeSetDebugClientInfoRequest,
4453        control_handle: BufferCollectionControlHandle,
4454    },
4455    /// Sysmem logs a warning if sysmem hasn't seen
4456    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
4457    /// within 5 seconds after creation of a new collection.
4458    ///
4459    /// Clients can call this method to change when the log is printed. If
4460    /// multiple client set the deadline, it's unspecified which deadline will
4461    /// take effect.
4462    ///
4463    /// In most cases the default works well.
4464    ///
4465    /// All table fields are currently required.
4466    ///
4467    /// + request `deadline` The time at which sysmem will start trying to log
4468    ///   the warning, unless all constraints are with sysmem by then.
4469    SetDebugTimeoutLogDeadline {
4470        payload: NodeSetDebugTimeoutLogDeadlineRequest,
4471        control_handle: BufferCollectionControlHandle,
4472    },
4473    /// This enables verbose logging for the buffer collection.
4474    ///
4475    /// Verbose logging includes constraints set via
4476    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
4477    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
4478    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
4479    /// the tree of `Node`(s).
4480    ///
4481    /// Normally sysmem prints only a single line complaint when aggregation
4482    /// fails, with just the specific detailed reason that aggregation failed,
4483    /// with little surrounding context.  While this is often enough to diagnose
4484    /// a problem if only a small change was made and everything was working
4485    /// before the small change, it's often not particularly helpful for getting
4486    /// a new buffer collection to work for the first time.  Especially with
4487    /// more complex trees of nodes, involving things like
4488    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
4489    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
4490    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
4491    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
4492    /// looks like and why it's failing a logical allocation, or why a tree or
4493    /// subtree is failing sooner than expected.
4494    ///
4495    /// The intent of the extra logging is to be acceptable from a performance
4496    /// point of view, under the assumption that verbose logging is only enabled
4497    /// on a low number of buffer collections. If we're not tracking down a bug,
4498    /// we shouldn't send this message.
4499    SetVerboseLogging { control_handle: BufferCollectionControlHandle },
4500    /// This gets a handle that can be used as a parameter to
4501    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
4502    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
4503    /// client obtained this handle from this `Node`.
4504    ///
4505    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
4506    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
4507    /// despite the two calls typically being on different channels.
4508    ///
4509    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
4510    ///
4511    /// All table fields are currently required.
4512    ///
4513    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
4514    ///   different `Node` channel, to prove that the client obtained the handle
4515    ///   from this `Node`.
4516    GetNodeRef { responder: BufferCollectionGetNodeRefResponder },
4517    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
4518    /// rooted at a different child token of a common parent
4519    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
4520    /// passed-in `node_ref`.
4521    ///
4522    /// This call is for assisting with admission control de-duplication, and
4523    /// with debugging.
4524    ///
4525    /// The `node_ref` must be obtained using
4526    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
4527    ///
4528    /// The `node_ref` can be a duplicated handle; it's not necessary to call
4529    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
4530    ///
4531    /// If a calling token may not actually be a valid token at all due to a
4532    /// potentially hostile/untrusted provider of the token, call
4533    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
4534    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
4535    /// never responds due to a calling token not being a real token (not really
4536    /// talking to sysmem).  Another option is to call
4537    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
4538    /// which also validates the token along with converting it to a
4539    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
4540    ///
4541    /// All table fields are currently required.
4542    ///
4543    /// - response `is_alternate`
4544    ///   - true: The first parent node in common between the calling node and
4545    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
4546    ///     that the calling `Node` and the `node_ref` `Node` will not have both
4547    ///     their constraints apply - rather sysmem will choose one or the other
4548    ///     of the constraints - never both.  This is because only one child of
4549    ///     a `BufferCollectionTokenGroup` is selected during logical
4550    ///     allocation, with only that one child's subtree contributing to
4551    ///     constraints aggregation.
4552    ///   - false: The first parent node in common between the calling `Node`
4553    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
4554    ///     Currently, this means the first parent node in common is a
4555    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
4556    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
4557    ///     `Node` may have both their constraints apply during constraints
4558    ///     aggregation of the logical allocation, if both `Node`(s) are
4559    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
4560    ///     this case, there is no `BufferCollectionTokenGroup` that will
4561    ///     directly prevent the two `Node`(s) from both being selected and
4562    ///     their constraints both aggregated, but even when false, one or both
4563    ///     `Node`(s) may still be eliminated from consideration if one or both
4564    ///     `Node`(s) has a direct or indirect parent
4565    ///     `BufferCollectionTokenGroup` which selects a child subtree other
4566    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
4567    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
4568    ///   associated with the same buffer collection as the calling `Node`.
4569    ///   Another reason for this error is if the `node_ref` is an
4570    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
4571    ///   a real `node_ref` obtained from `GetNodeRef`.
4572    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
4573    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
4574    ///   the needed rights expected on a real `node_ref`.
4575    /// * No other failing status codes are returned by this call.  However,
4576    ///   sysmem may add additional codes in future, so the client should have
4577    ///   sensible default handling for any failing status code.
4578    IsAlternateFor {
4579        payload: NodeIsAlternateForRequest,
4580        responder: BufferCollectionIsAlternateForResponder,
4581    },
4582    /// Get the buffer collection ID. This ID is also available from
4583    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
4584    /// within the collection).
4585    ///
4586    /// This call is mainly useful in situations where we can't convey a
4587    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
4588    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
4589    /// handle, which can be joined back up with a `BufferCollection` client end
4590    /// that was created via a different path. Prefer to convey a
4591    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
4592    ///
4593    /// Trusting a `buffer_collection_id` value from a source other than sysmem
4594    /// is analogous to trusting a koid value from a source other than zircon.
4595    /// Both should be avoided unless really necessary, and both require
4596    /// caution. In some situations it may be reasonable to refer to a
4597    /// pre-established `BufferCollection` by `buffer_collection_id` via a
4598    /// protocol for efficiency reasons, but an incoming value purporting to be
4599    /// a `buffer_collection_id` is not sufficient alone to justify granting the
4600    /// sender of the `buffer_collection_id` any capability. The sender must
4601    /// first prove to a receiver that the sender has/had a VMO or has/had a
4602    /// `BufferCollectionToken` to the same collection by sending a handle that
4603    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
4604    /// `buffer_collection_id` value. The receiver should take care to avoid
4605    /// assuming that a sender had a `BufferCollectionToken` in cases where the
4606    /// sender has only proven that the sender had a VMO.
4607    ///
4608    /// - response `buffer_collection_id` This ID is unique per buffer
4609    ///   collection per boot. Each buffer is uniquely identified by the
4610    ///   `buffer_collection_id` and `buffer_index` together.
4611    GetBufferCollectionId { responder: BufferCollectionGetBufferCollectionIdResponder },
4612    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
4613    /// created after this message to weak, which means that a client's `Node`
4614    /// client end (or a child created after this message) is not alone
4615    /// sufficient to keep allocated VMOs alive.
4616    ///
4617    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
4618    /// `close_weak_asap`.
4619    ///
4620    /// This message is only permitted before the `Node` becomes ready for
4621    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
4622    ///   * `BufferCollectionToken`: any time
4623    ///   * `BufferCollection`: before `SetConstraints`
4624    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
4625    ///
4626    /// Currently, no conversion from strong `Node` to weak `Node` after ready
4627    /// for allocation is provided, but a client can simulate that by creating
4628    /// an additional `Node` before allocation and setting that additional
4629    /// `Node` to weak, and then potentially at some point later sending
4630    /// `Release` and closing the client end of the client's strong `Node`, but
4631    /// keeping the client's weak `Node`.
4632    ///
4633    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
4634    /// collection failure (all `Node` client end(s) will see
4635    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
4636    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
4637    /// this situation until all `Node`(s) are ready for allocation. For initial
4638    /// allocation to succeed, at least one strong `Node` is required to exist
4639    /// at allocation time, but after that client receives VMO handles, that
4640    /// client can `BufferCollection.Release` and close the client end without
4641    /// causing this type of failure.
4642    ///
4643    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
4644    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
4645    /// separately as appropriate.
4646    SetWeak { control_handle: BufferCollectionControlHandle },
4647    /// This indicates to sysmem that the client is prepared to pay attention to
4648    /// `close_weak_asap`.
4649    ///
4650    /// If sent, this message must be before
4651    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
4652    ///
4653    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
4654    /// send this message before `WaitForAllBuffersAllocated`, or a parent
4655    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
4656    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
4657    /// trigger buffer collection failure.
4658    ///
4659    /// This message is necessary because weak sysmem VMOs have not always been
4660    /// a thing, so older clients are not aware of the need to pay attention to
4661    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
4662    /// sysmem weak VMO handles asap. By having this message and requiring
4663    /// participants to indicate their acceptance of this aspect of the overall
4664    /// protocol, we avoid situations where an older client is delivered a weak
4665    /// VMO without any way for sysmem to get that VMO to close quickly later
4666    /// (and on a per-buffer basis).
4667    ///
4668    /// A participant that doesn't handle `close_weak_asap` and also doesn't
4669    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
4670    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
4671    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
4672    /// same participant has a child/delegate which does retrieve VMOs, that
4673    /// child/delegate will need to send `SetWeakOk` before
4674    /// `WaitForAllBuffersAllocated`.
4675    ///
4676    /// + request `for_child_nodes_also` If present and true, this means direct
4677    ///   child nodes of this node created after this message plus all
4678    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
4679    ///   those nodes. Any child node of this node that was created before this
4680    ///   message is not included. This setting is "sticky" in the sense that a
4681    ///   subsequent `SetWeakOk` without this bool set to true does not reset
4682    ///   the server-side bool. If this creates a problem for a participant, a
4683    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
4684    ///   tokens instead, as appropriate. A participant should only set
4685    ///   `for_child_nodes_also` true if the participant can really promise to
4686    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
4687    ///   weak VMO handles held by participants holding the corresponding child
4688    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
4689    ///   which are using sysmem(1) can be weak, despite the clients of those
4690    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
4691    ///   direct way to find out about `close_weak_asap`. This only applies to
4692    ///   descendents of this `Node` which are using sysmem(1), not to this
4693    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
4694    ///   token, which will fail allocation unless an ancestor of this `Node`
4695    ///   specified `for_child_nodes_also` true.
4696    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionControlHandle },
4697    /// The server_end will be closed after this `Node` and any child nodes have
4698    /// have released their buffer counts, making those counts available for
4699    /// reservation by a different `Node` via
4700    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
4701    ///
4702    /// The `Node` buffer counts may not be released until the entire tree of
4703    /// `Node`(s) is closed or failed, because
4704    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
4705    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
4706    /// `Node` buffer counts remain reserved until the orphaned node is later
4707    /// cleaned up.
4708    ///
4709    /// If the `Node` exceeds a fairly large number of attached eventpair server
4710    /// ends, a log message will indicate this and the `Node` (and the
4711    /// appropriate) sub-tree will fail.
4712    ///
4713    /// The `server_end` will remain open when
4714    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
4715    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
4716    /// [`fuchsia.sysmem2/BufferCollection`].
4717    ///
4718    /// This message can also be used with a
4719    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
4720    AttachNodeTracking {
4721        payload: NodeAttachNodeTrackingRequest,
4722        control_handle: BufferCollectionControlHandle,
4723    },
4724    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
4725    /// collection.
4726    ///
4727    /// A participant may only call
4728    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
4729    /// [`fuchsia.sysmem2/BufferCollection`].
4730    ///
4731    /// For buffer allocation to be attempted, all holders of a
4732    /// `BufferCollection` client end need to call `SetConstraints` before
4733    /// sysmem will attempt to allocate buffers.
4734    ///
4735    /// + request `constraints` These are the constraints on the buffer
4736    ///   collection imposed by the sending client/participant.  The
4737    ///   `constraints` field is not required to be set. If not set, the client
4738    ///   is not setting any actual constraints, but is indicating that the
4739    ///   client has no constraints to set. A client that doesn't set the
4740    ///   `constraints` field won't receive any VMO handles, but can still find
4741    ///   out how many buffers were allocated and can still refer to buffers by
4742    ///   their `buffer_index`.
4743    SetConstraints {
4744        payload: BufferCollectionSetConstraintsRequest,
4745        control_handle: BufferCollectionControlHandle,
4746    },
4747    /// Wait until all buffers are allocated.
4748    ///
4749    /// This FIDL call completes when buffers have been allocated, or completes
4750    /// with some failure detail if allocation has been attempted but failed.
4751    ///
4752    /// The following must occur before buffers will be allocated:
4753    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
4754    ///     collection must be turned in via `BindSharedCollection` to get a
4755    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
4756    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
4757    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
4758    ///     to them.
4759    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
4760    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
4761    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
4762    ///     sent to them.
4763    ///
4764    /// - result `buffer_collection_info` The VMO handles and other related
4765    ///   info.
4766    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
4767    ///   cannot be fulfilled due to resource exhaustion.
4768    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
4769    ///   malformed.
4770    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
4771    ///   request is valid but cannot be satisfied, perhaps due to hardware
4772    ///   limitations. This can happen if participants have incompatible
4773    ///   constraints (empty intersection, roughly speaking). See the log for
4774    ///   more info. In cases where a participant could potentially be treated
4775    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
4776    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
4777    ///   error code if there aren't enough buffers in the pre-existing
4778    ///   collection to satisfy the constraints set on the attached token and
4779    ///   any sub-tree of tokens derived from the attached token.
4780    WaitForAllBuffersAllocated { responder: BufferCollectionWaitForAllBuffersAllocatedResponder },
4781    /// Checks whether all the buffers have been allocated, in a polling
4782    /// fashion.
4783    ///
4784    /// * If the buffer collection has been allocated, returns success.
4785    /// * If the buffer collection failed allocation, returns the same
4786    ///   [`fuchsia.sysmem2/Error`] as
4787    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
4788    ///   return.
4789    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
4790    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
4791    ///   would not respond quickly.
4792    CheckAllBuffersAllocated { responder: BufferCollectionCheckAllBuffersAllocatedResponder },
4793    /// Create a new token to add a new participant to an existing logical
4794    /// buffer collection, if the existing collection's buffer counts,
4795    /// constraints, and participants allow.
4796    ///
4797    /// This can be useful in replacing a failed participant, and/or in
4798    /// adding/re-adding a participant after buffers have already been
4799    /// allocated.
4800    ///
4801    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
4802    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
4803    /// goes through the normal procedure of setting constraints or closing
4804    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
4805    /// clients' point of view, despite the possibility that all the buffers
4806    /// were actually allocated previously. This process is called "logical
4807    /// allocation". Most instances of "allocation" in docs for other messages
4808    /// can also be read as "allocation or logical allocation" while remaining
4809    /// valid, but we just say "allocation" in most places for brevity/clarity
4810    /// of explanation, with the details of "logical allocation" left for the
4811    /// docs here on `AttachToken`.
4812    ///
4813    /// Failure of an attached `Node` does not propagate to the parent of the
4814    /// attached `Node`. More generally, failure of a child `Node` is blocked
4815    /// from reaching its parent `Node` if the child is attached, or if the
4816    /// child is dispensable and the failure occurred after logical allocation
4817    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
4818    ///
4819    /// A participant may in some scenarios choose to initially use a
4820    /// dispensable token for a given instance of a delegate participant, and
4821    /// then later if the first instance of that delegate participant fails, a
4822    /// new second instance of that delegate participant my be given a token
4823    /// created with `AttachToken`.
4824    ///
4825    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
4826    /// client end, the token acts like any other token. The client can
4827    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
4828    /// and can send the token to a different process/participant. The
4829    /// `BufferCollectionToken` `Node` should be converted to a
4830    /// `BufferCollection` `Node` as normal by sending
4831    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
4832    /// without causing subtree failure by sending
4833    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
4834    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
4835    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
4836    /// the `BufferCollection`.
4837    ///
4838    /// Within the subtree, a success result from
4839    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
4840    /// the subtree participants' constraints were satisfiable using the
4841    /// already-existing buffer collection, the already-established
4842    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
4843    /// constraints, and the already-existing other participants (already added
4844    /// via successful logical allocation) and their specified buffer counts in
4845    /// their constraints. A failure result means the new participants'
4846    /// constraints cannot be satisfied using the existing buffer collection and
4847    /// its already-added participants. Creating a new collection instead may
4848    /// allow all participants' constraints to be satisfied, assuming
4849    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
4850    /// used.
4851    ///
4852    /// A token created with `AttachToken` performs constraints aggregation with
4853    /// all constraints currently in effect on the buffer collection, plus the
4854    /// attached token under consideration plus child tokens under the attached
4855    /// token which are not themselves an attached token or under such a token.
4856    /// Further subtrees under this subtree are considered for logical
4857    /// allocation only after this subtree has completed logical allocation.
4858    ///
4859    /// Assignment of existing buffers to participants'
4860    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
4861    /// etc is first-come first-served, but a child can't logically allocate
4862    /// before all its parents have sent `SetConstraints`.
4863    ///
4864    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
4865    /// in contrast to `AttachToken`, has the created token `Node` + child
4866    /// `Node`(s) (in the created subtree but not in any subtree under this
4867    /// subtree) participate in constraints aggregation along with its parent
4868    /// during the parent's allocation or logical allocation.
4869    ///
4870    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
4871    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
4872    /// sysmem before the new token can be passed to `BindSharedCollection`. The
4873    /// `Sync` of the new token can be accomplished with
4874    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
4875    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
4876    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
4877    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
4878    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
4879    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
4880    /// created token, to also sync those additional tokens to sysmem using a
4881    /// single round-trip.
4882    ///
4883    /// All table fields are currently required.
4884    ///
4885    /// + request `rights_attentuation_mask` This allows attenuating the VMO
4886    ///   rights of the subtree. These values for `rights_attenuation_mask`
4887    ///   result in no attenuation (note that 0 is not on this list):
4888    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
4889    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
4890    /// + request `token_request` The server end of the `BufferCollectionToken`
4891    ///   channel. The client retains the client end.
4892    AttachToken {
4893        payload: BufferCollectionAttachTokenRequest,
4894        control_handle: BufferCollectionControlHandle,
4895    },
4896    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
4897    /// buffers have been allocated and only the specified number of buffers (or
4898    /// fewer) remain in the buffer collection.
4899    ///
4900    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
4901    /// client to wait until an old buffer collection is fully or mostly
4902    /// deallocated before attempting allocation of a new buffer collection. The
4903    /// eventpair is only signalled when the buffers of this collection have
4904    /// been fully deallocated (not just un-referenced by clients, but all the
4905    /// memory consumed by those buffers has been fully reclaimed/recycled), or
4906    /// when allocation or logical allocation fails for the tree or subtree
4907    /// including this [`fuchsia.sysmem2/BufferCollection`].
4908    ///
4909    /// The eventpair won't be signalled until allocation or logical allocation
4910    /// has completed; until then, the collection's current buffer count is
4911    /// ignored.
4912    ///
4913    /// If logical allocation fails for an attached subtree (using
4914    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
4915    /// eventpair will close during that failure regardless of the number of
4916    /// buffers potenitally allocated in the overall buffer collection. This is
4917    /// for logical allocation consistency with normal allocation.
4918    ///
4919    /// The lifetime signalled by this event includes asynchronous cleanup of
4920    /// allocated buffers, and this asynchronous cleanup cannot occur until all
4921    /// holders of VMO handles to the buffers have closed those VMO handles.
4922    /// Therefore, clients should take care not to become blocked forever
4923    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
4924    /// participants using the logical buffer collection (including the waiter
4925    /// itself) are less trusted, less reliable, or potentially blocked by the
4926    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
4927    /// for the client wait may be prudent, depending on details of how the
4928    /// collection and/or its VMOs are used or shared. Failure to allocate a
4929    /// new/replacement buffer collection is better than getting stuck forever.
4930    ///
4931    /// The sysmem server itself intentionally does not perform any waiting on
4932    /// already-failed collections' VMOs to finish cleaning up before attempting
4933    /// a new allocation, and the sysmem server intentionally doesn't retry
4934    /// allocation if a new allocation fails due to out of memory, even if that
4935    /// failure is potentially due to continued existence of an old collection's
4936    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
4937    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
4938    /// as long as the waiting client is careful to not create a deadlock.
4939    ///
4940    /// Continued existence of old collections that are still cleaning up is not
4941    /// the only reason that a new allocation may fail due to insufficient
4942    /// memory, even if the new allocation is allocating physically contiguous
4943    /// buffers. Overall system memory pressure can also be the cause of failure
4944    /// to allocate a new collection. See also
4945    /// [`fuchsia.memorypressure/Provider`].
4946    ///
4947    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
4948    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
4949    /// `eventpair` handle (server end) can be sent via more than one
4950    /// `AttachLifetimeTracking` message to different protocols, and the
4951    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
4952    /// the conditions are met (all holders of duplicates have closed their
4953    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
4954    /// client end can (also) be duplicated without preventing the
4955    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
4956    ///
4957    /// The server intentionally doesn't "trust" any signals set on the
4958    /// `server_end`. This mechanism intentionally uses only
4959    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
4960    /// "early", and is only set when all handles to the server end eventpair
4961    /// are closed. No meaning is associated with any of the other signals, and
4962    /// clients should ignore any other signal bits on either end of the
4963    /// `eventpair`.
4964    ///
4965    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
4966    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
4967    /// transfer without causing `BufferCollection` channel failure).
4968    ///
4969    /// All table fields are currently required.
4970    ///
4971    /// + request `server_end` This eventpair handle will be closed by the
4972    ///   sysmem server when buffers have been allocated initially and the
4973    ///   number of buffers is then less than or equal to `buffers_remaining`.
4974    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
4975    ///   fewer) buffers to be fully deallocated. A number greater than zero can
4976    ///   be useful in situations where a known number of buffers are
4977    ///   intentionally not closed so that the data can continue to be used,
4978    ///   such as for keeping the last available video frame displayed in the UI
4979    ///   even if the video stream was using protected output buffers. It's
4980    ///   outside the scope of the `BufferCollection` interface (at least for
4981    ///   now) to determine how many buffers may be held without closing, but
4982    ///   it'll typically be in the range 0-2.
4983    AttachLifetimeTracking {
4984        payload: BufferCollectionAttachLifetimeTrackingRequest,
4985        control_handle: BufferCollectionControlHandle,
4986    },
4987    /// An interaction was received which does not match any known method.
4988    #[non_exhaustive]
4989    _UnknownMethod {
4990        /// Ordinal of the method that was called.
4991        ordinal: u64,
4992        control_handle: BufferCollectionControlHandle,
4993        method_type: fidl::MethodType,
4994    },
4995}
4996
4997impl BufferCollectionRequest {
4998    #[allow(irrefutable_let_patterns)]
4999    pub fn into_sync(self) -> Option<(BufferCollectionSyncResponder)> {
5000        if let BufferCollectionRequest::Sync { responder } = self {
5001            Some((responder))
5002        } else {
5003            None
5004        }
5005    }
5006
5007    #[allow(irrefutable_let_patterns)]
5008    pub fn into_release(self) -> Option<(BufferCollectionControlHandle)> {
5009        if let BufferCollectionRequest::Release { control_handle } = self {
5010            Some((control_handle))
5011        } else {
5012            None
5013        }
5014    }
5015
5016    #[allow(irrefutable_let_patterns)]
5017    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionControlHandle)> {
5018        if let BufferCollectionRequest::SetName { payload, control_handle } = self {
5019            Some((payload, control_handle))
5020        } else {
5021            None
5022        }
5023    }
5024
5025    #[allow(irrefutable_let_patterns)]
5026    pub fn into_set_debug_client_info(
5027        self,
5028    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionControlHandle)> {
5029        if let BufferCollectionRequest::SetDebugClientInfo { payload, control_handle } = self {
5030            Some((payload, control_handle))
5031        } else {
5032            None
5033        }
5034    }
5035
5036    #[allow(irrefutable_let_patterns)]
5037    pub fn into_set_debug_timeout_log_deadline(
5038        self,
5039    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionControlHandle)> {
5040        if let BufferCollectionRequest::SetDebugTimeoutLogDeadline { payload, control_handle } =
5041            self
5042        {
5043            Some((payload, control_handle))
5044        } else {
5045            None
5046        }
5047    }
5048
5049    #[allow(irrefutable_let_patterns)]
5050    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionControlHandle)> {
5051        if let BufferCollectionRequest::SetVerboseLogging { control_handle } = self {
5052            Some((control_handle))
5053        } else {
5054            None
5055        }
5056    }
5057
5058    #[allow(irrefutable_let_patterns)]
5059    pub fn into_get_node_ref(self) -> Option<(BufferCollectionGetNodeRefResponder)> {
5060        if let BufferCollectionRequest::GetNodeRef { responder } = self {
5061            Some((responder))
5062        } else {
5063            None
5064        }
5065    }
5066
5067    #[allow(irrefutable_let_patterns)]
5068    pub fn into_is_alternate_for(
5069        self,
5070    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionIsAlternateForResponder)> {
5071        if let BufferCollectionRequest::IsAlternateFor { payload, responder } = self {
5072            Some((payload, responder))
5073        } else {
5074            None
5075        }
5076    }
5077
5078    #[allow(irrefutable_let_patterns)]
5079    pub fn into_get_buffer_collection_id(
5080        self,
5081    ) -> Option<(BufferCollectionGetBufferCollectionIdResponder)> {
5082        if let BufferCollectionRequest::GetBufferCollectionId { responder } = self {
5083            Some((responder))
5084        } else {
5085            None
5086        }
5087    }
5088
5089    #[allow(irrefutable_let_patterns)]
5090    pub fn into_set_weak(self) -> Option<(BufferCollectionControlHandle)> {
5091        if let BufferCollectionRequest::SetWeak { control_handle } = self {
5092            Some((control_handle))
5093        } else {
5094            None
5095        }
5096    }
5097
5098    #[allow(irrefutable_let_patterns)]
5099    pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, BufferCollectionControlHandle)> {
5100        if let BufferCollectionRequest::SetWeakOk { payload, control_handle } = self {
5101            Some((payload, control_handle))
5102        } else {
5103            None
5104        }
5105    }
5106
5107    #[allow(irrefutable_let_patterns)]
5108    pub fn into_attach_node_tracking(
5109        self,
5110    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionControlHandle)> {
5111        if let BufferCollectionRequest::AttachNodeTracking { payload, control_handle } = self {
5112            Some((payload, control_handle))
5113        } else {
5114            None
5115        }
5116    }
5117
5118    #[allow(irrefutable_let_patterns)]
5119    pub fn into_set_constraints(
5120        self,
5121    ) -> Option<(BufferCollectionSetConstraintsRequest, BufferCollectionControlHandle)> {
5122        if let BufferCollectionRequest::SetConstraints { payload, control_handle } = self {
5123            Some((payload, control_handle))
5124        } else {
5125            None
5126        }
5127    }
5128
5129    #[allow(irrefutable_let_patterns)]
5130    pub fn into_wait_for_all_buffers_allocated(
5131        self,
5132    ) -> Option<(BufferCollectionWaitForAllBuffersAllocatedResponder)> {
5133        if let BufferCollectionRequest::WaitForAllBuffersAllocated { responder } = self {
5134            Some((responder))
5135        } else {
5136            None
5137        }
5138    }
5139
5140    #[allow(irrefutable_let_patterns)]
5141    pub fn into_check_all_buffers_allocated(
5142        self,
5143    ) -> Option<(BufferCollectionCheckAllBuffersAllocatedResponder)> {
5144        if let BufferCollectionRequest::CheckAllBuffersAllocated { responder } = self {
5145            Some((responder))
5146        } else {
5147            None
5148        }
5149    }
5150
5151    #[allow(irrefutable_let_patterns)]
5152    pub fn into_attach_token(
5153        self,
5154    ) -> Option<(BufferCollectionAttachTokenRequest, BufferCollectionControlHandle)> {
5155        if let BufferCollectionRequest::AttachToken { payload, control_handle } = self {
5156            Some((payload, control_handle))
5157        } else {
5158            None
5159        }
5160    }
5161
5162    #[allow(irrefutable_let_patterns)]
5163    pub fn into_attach_lifetime_tracking(
5164        self,
5165    ) -> Option<(BufferCollectionAttachLifetimeTrackingRequest, BufferCollectionControlHandle)>
5166    {
5167        if let BufferCollectionRequest::AttachLifetimeTracking { payload, control_handle } = self {
5168            Some((payload, control_handle))
5169        } else {
5170            None
5171        }
5172    }
5173
5174    /// Name of the method defined in FIDL
5175    pub fn method_name(&self) -> &'static str {
5176        match *self {
5177            BufferCollectionRequest::Sync { .. } => "sync",
5178            BufferCollectionRequest::Release { .. } => "release",
5179            BufferCollectionRequest::SetName { .. } => "set_name",
5180            BufferCollectionRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
5181            BufferCollectionRequest::SetDebugTimeoutLogDeadline { .. } => {
5182                "set_debug_timeout_log_deadline"
5183            }
5184            BufferCollectionRequest::SetVerboseLogging { .. } => "set_verbose_logging",
5185            BufferCollectionRequest::GetNodeRef { .. } => "get_node_ref",
5186            BufferCollectionRequest::IsAlternateFor { .. } => "is_alternate_for",
5187            BufferCollectionRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
5188            BufferCollectionRequest::SetWeak { .. } => "set_weak",
5189            BufferCollectionRequest::SetWeakOk { .. } => "set_weak_ok",
5190            BufferCollectionRequest::AttachNodeTracking { .. } => "attach_node_tracking",
5191            BufferCollectionRequest::SetConstraints { .. } => "set_constraints",
5192            BufferCollectionRequest::WaitForAllBuffersAllocated { .. } => {
5193                "wait_for_all_buffers_allocated"
5194            }
5195            BufferCollectionRequest::CheckAllBuffersAllocated { .. } => {
5196                "check_all_buffers_allocated"
5197            }
5198            BufferCollectionRequest::AttachToken { .. } => "attach_token",
5199            BufferCollectionRequest::AttachLifetimeTracking { .. } => "attach_lifetime_tracking",
5200            BufferCollectionRequest::_UnknownMethod {
5201                method_type: fidl::MethodType::OneWay,
5202                ..
5203            } => "unknown one-way method",
5204            BufferCollectionRequest::_UnknownMethod {
5205                method_type: fidl::MethodType::TwoWay,
5206                ..
5207            } => "unknown two-way method",
5208        }
5209    }
5210}
5211
5212#[derive(Debug, Clone)]
5213pub struct BufferCollectionControlHandle {
5214    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
5215}
5216
5217impl fidl::endpoints::ControlHandle for BufferCollectionControlHandle {
5218    fn shutdown(&self) {
5219        self.inner.shutdown()
5220    }
5221
5222    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
5223        self.inner.shutdown_with_epitaph(status)
5224    }
5225
5226    fn is_closed(&self) -> bool {
5227        self.inner.channel().is_closed()
5228    }
5229    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
5230        self.inner.channel().on_closed()
5231    }
5232
5233    #[cfg(target_os = "fuchsia")]
5234    fn signal_peer(
5235        &self,
5236        clear_mask: zx::Signals,
5237        set_mask: zx::Signals,
5238    ) -> Result<(), zx_status::Status> {
5239        use fidl::Peered;
5240        self.inner.channel().signal_peer(clear_mask, set_mask)
5241    }
5242}
5243
5244impl BufferCollectionControlHandle {}
5245
5246#[must_use = "FIDL methods require a response to be sent"]
5247#[derive(Debug)]
5248pub struct BufferCollectionSyncResponder {
5249    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5250    tx_id: u32,
5251}
5252
5253/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5254/// if the responder is dropped without sending a response, so that the client
5255/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5256impl std::ops::Drop for BufferCollectionSyncResponder {
5257    fn drop(&mut self) {
5258        self.control_handle.shutdown();
5259        // Safety: drops once, never accessed again
5260        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5261    }
5262}
5263
5264impl fidl::endpoints::Responder for BufferCollectionSyncResponder {
5265    type ControlHandle = BufferCollectionControlHandle;
5266
5267    fn control_handle(&self) -> &BufferCollectionControlHandle {
5268        &self.control_handle
5269    }
5270
5271    fn drop_without_shutdown(mut self) {
5272        // Safety: drops once, never accessed again due to mem::forget
5273        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5274        // Prevent Drop from running (which would shut down the channel)
5275        std::mem::forget(self);
5276    }
5277}
5278
5279impl BufferCollectionSyncResponder {
5280    /// Sends a response to the FIDL transaction.
5281    ///
5282    /// Sets the channel to shutdown if an error occurs.
5283    pub fn send(self) -> Result<(), fidl::Error> {
5284        let _result = self.send_raw();
5285        if _result.is_err() {
5286            self.control_handle.shutdown();
5287        }
5288        self.drop_without_shutdown();
5289        _result
5290    }
5291
5292    /// Similar to "send" but does not shutdown the channel if an error occurs.
5293    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
5294        let _result = self.send_raw();
5295        self.drop_without_shutdown();
5296        _result
5297    }
5298
5299    fn send_raw(&self) -> Result<(), fidl::Error> {
5300        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
5301            fidl::encoding::Flexible::new(()),
5302            self.tx_id,
5303            0x11ac2555cf575b54,
5304            fidl::encoding::DynamicFlags::FLEXIBLE,
5305        )
5306    }
5307}
5308
5309#[must_use = "FIDL methods require a response to be sent"]
5310#[derive(Debug)]
5311pub struct BufferCollectionGetNodeRefResponder {
5312    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5313    tx_id: u32,
5314}
5315
5316/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5317/// if the responder is dropped without sending a response, so that the client
5318/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5319impl std::ops::Drop for BufferCollectionGetNodeRefResponder {
5320    fn drop(&mut self) {
5321        self.control_handle.shutdown();
5322        // Safety: drops once, never accessed again
5323        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5324    }
5325}
5326
5327impl fidl::endpoints::Responder for BufferCollectionGetNodeRefResponder {
5328    type ControlHandle = BufferCollectionControlHandle;
5329
5330    fn control_handle(&self) -> &BufferCollectionControlHandle {
5331        &self.control_handle
5332    }
5333
5334    fn drop_without_shutdown(mut self) {
5335        // Safety: drops once, never accessed again due to mem::forget
5336        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5337        // Prevent Drop from running (which would shut down the channel)
5338        std::mem::forget(self);
5339    }
5340}
5341
5342impl BufferCollectionGetNodeRefResponder {
5343    /// Sends a response to the FIDL transaction.
5344    ///
5345    /// Sets the channel to shutdown if an error occurs.
5346    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5347        let _result = self.send_raw(payload);
5348        if _result.is_err() {
5349            self.control_handle.shutdown();
5350        }
5351        self.drop_without_shutdown();
5352        _result
5353    }
5354
5355    /// Similar to "send" but does not shutdown the channel if an error occurs.
5356    pub fn send_no_shutdown_on_err(
5357        self,
5358        mut payload: NodeGetNodeRefResponse,
5359    ) -> Result<(), fidl::Error> {
5360        let _result = self.send_raw(payload);
5361        self.drop_without_shutdown();
5362        _result
5363    }
5364
5365    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5366        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
5367            fidl::encoding::Flexible::new(&mut payload),
5368            self.tx_id,
5369            0x5b3d0e51614df053,
5370            fidl::encoding::DynamicFlags::FLEXIBLE,
5371        )
5372    }
5373}
5374
5375#[must_use = "FIDL methods require a response to be sent"]
5376#[derive(Debug)]
5377pub struct BufferCollectionIsAlternateForResponder {
5378    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5379    tx_id: u32,
5380}
5381
5382/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5383/// if the responder is dropped without sending a response, so that the client
5384/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5385impl std::ops::Drop for BufferCollectionIsAlternateForResponder {
5386    fn drop(&mut self) {
5387        self.control_handle.shutdown();
5388        // Safety: drops once, never accessed again
5389        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5390    }
5391}
5392
5393impl fidl::endpoints::Responder for BufferCollectionIsAlternateForResponder {
5394    type ControlHandle = BufferCollectionControlHandle;
5395
5396    fn control_handle(&self) -> &BufferCollectionControlHandle {
5397        &self.control_handle
5398    }
5399
5400    fn drop_without_shutdown(mut self) {
5401        // Safety: drops once, never accessed again due to mem::forget
5402        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5403        // Prevent Drop from running (which would shut down the channel)
5404        std::mem::forget(self);
5405    }
5406}
5407
5408impl BufferCollectionIsAlternateForResponder {
5409    /// Sends a response to the FIDL transaction.
5410    ///
5411    /// Sets the channel to shutdown if an error occurs.
5412    pub fn send(
5413        self,
5414        mut result: Result<&NodeIsAlternateForResponse, Error>,
5415    ) -> Result<(), fidl::Error> {
5416        let _result = self.send_raw(result);
5417        if _result.is_err() {
5418            self.control_handle.shutdown();
5419        }
5420        self.drop_without_shutdown();
5421        _result
5422    }
5423
5424    /// Similar to "send" but does not shutdown the channel if an error occurs.
5425    pub fn send_no_shutdown_on_err(
5426        self,
5427        mut result: Result<&NodeIsAlternateForResponse, Error>,
5428    ) -> Result<(), fidl::Error> {
5429        let _result = self.send_raw(result);
5430        self.drop_without_shutdown();
5431        _result
5432    }
5433
5434    fn send_raw(
5435        &self,
5436        mut result: Result<&NodeIsAlternateForResponse, Error>,
5437    ) -> Result<(), fidl::Error> {
5438        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5439            NodeIsAlternateForResponse,
5440            Error,
5441        >>(
5442            fidl::encoding::FlexibleResult::new(result),
5443            self.tx_id,
5444            0x3a58e00157e0825,
5445            fidl::encoding::DynamicFlags::FLEXIBLE,
5446        )
5447    }
5448}
5449
5450#[must_use = "FIDL methods require a response to be sent"]
5451#[derive(Debug)]
5452pub struct BufferCollectionGetBufferCollectionIdResponder {
5453    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5454    tx_id: u32,
5455}
5456
5457/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5458/// if the responder is dropped without sending a response, so that the client
5459/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5460impl std::ops::Drop for BufferCollectionGetBufferCollectionIdResponder {
5461    fn drop(&mut self) {
5462        self.control_handle.shutdown();
5463        // Safety: drops once, never accessed again
5464        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5465    }
5466}
5467
5468impl fidl::endpoints::Responder for BufferCollectionGetBufferCollectionIdResponder {
5469    type ControlHandle = BufferCollectionControlHandle;
5470
5471    fn control_handle(&self) -> &BufferCollectionControlHandle {
5472        &self.control_handle
5473    }
5474
5475    fn drop_without_shutdown(mut self) {
5476        // Safety: drops once, never accessed again due to mem::forget
5477        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5478        // Prevent Drop from running (which would shut down the channel)
5479        std::mem::forget(self);
5480    }
5481}
5482
5483impl BufferCollectionGetBufferCollectionIdResponder {
5484    /// Sends a response to the FIDL transaction.
5485    ///
5486    /// Sets the channel to shutdown if an error occurs.
5487    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5488        let _result = self.send_raw(payload);
5489        if _result.is_err() {
5490            self.control_handle.shutdown();
5491        }
5492        self.drop_without_shutdown();
5493        _result
5494    }
5495
5496    /// Similar to "send" but does not shutdown the channel if an error occurs.
5497    pub fn send_no_shutdown_on_err(
5498        self,
5499        mut payload: &NodeGetBufferCollectionIdResponse,
5500    ) -> Result<(), fidl::Error> {
5501        let _result = self.send_raw(payload);
5502        self.drop_without_shutdown();
5503        _result
5504    }
5505
5506    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5507        self.control_handle
5508            .inner
5509            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
5510                fidl::encoding::Flexible::new(payload),
5511                self.tx_id,
5512                0x77d19a494b78ba8c,
5513                fidl::encoding::DynamicFlags::FLEXIBLE,
5514            )
5515    }
5516}
5517
5518#[must_use = "FIDL methods require a response to be sent"]
5519#[derive(Debug)]
5520pub struct BufferCollectionWaitForAllBuffersAllocatedResponder {
5521    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5522    tx_id: u32,
5523}
5524
5525/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5526/// if the responder is dropped without sending a response, so that the client
5527/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5528impl std::ops::Drop for BufferCollectionWaitForAllBuffersAllocatedResponder {
5529    fn drop(&mut self) {
5530        self.control_handle.shutdown();
5531        // Safety: drops once, never accessed again
5532        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5533    }
5534}
5535
5536impl fidl::endpoints::Responder for BufferCollectionWaitForAllBuffersAllocatedResponder {
5537    type ControlHandle = BufferCollectionControlHandle;
5538
5539    fn control_handle(&self) -> &BufferCollectionControlHandle {
5540        &self.control_handle
5541    }
5542
5543    fn drop_without_shutdown(mut self) {
5544        // Safety: drops once, never accessed again due to mem::forget
5545        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5546        // Prevent Drop from running (which would shut down the channel)
5547        std::mem::forget(self);
5548    }
5549}
5550
5551impl BufferCollectionWaitForAllBuffersAllocatedResponder {
5552    /// Sends a response to the FIDL transaction.
5553    ///
5554    /// Sets the channel to shutdown if an error occurs.
5555    pub fn send(
5556        self,
5557        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5558    ) -> Result<(), fidl::Error> {
5559        let _result = self.send_raw(result);
5560        if _result.is_err() {
5561            self.control_handle.shutdown();
5562        }
5563        self.drop_without_shutdown();
5564        _result
5565    }
5566
5567    /// Similar to "send" but does not shutdown the channel if an error occurs.
5568    pub fn send_no_shutdown_on_err(
5569        self,
5570        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5571    ) -> Result<(), fidl::Error> {
5572        let _result = self.send_raw(result);
5573        self.drop_without_shutdown();
5574        _result
5575    }
5576
5577    fn send_raw(
5578        &self,
5579        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5580    ) -> Result<(), fidl::Error> {
5581        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5582            BufferCollectionWaitForAllBuffersAllocatedResponse,
5583            Error,
5584        >>(
5585            fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
5586            self.tx_id,
5587            0x62300344b61404e,
5588            fidl::encoding::DynamicFlags::FLEXIBLE,
5589        )
5590    }
5591}
5592
5593#[must_use = "FIDL methods require a response to be sent"]
5594#[derive(Debug)]
5595pub struct BufferCollectionCheckAllBuffersAllocatedResponder {
5596    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5597    tx_id: u32,
5598}
5599
5600/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5601/// if the responder is dropped without sending a response, so that the client
5602/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5603impl std::ops::Drop for BufferCollectionCheckAllBuffersAllocatedResponder {
5604    fn drop(&mut self) {
5605        self.control_handle.shutdown();
5606        // Safety: drops once, never accessed again
5607        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5608    }
5609}
5610
5611impl fidl::endpoints::Responder for BufferCollectionCheckAllBuffersAllocatedResponder {
5612    type ControlHandle = BufferCollectionControlHandle;
5613
5614    fn control_handle(&self) -> &BufferCollectionControlHandle {
5615        &self.control_handle
5616    }
5617
5618    fn drop_without_shutdown(mut self) {
5619        // Safety: drops once, never accessed again due to mem::forget
5620        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5621        // Prevent Drop from running (which would shut down the channel)
5622        std::mem::forget(self);
5623    }
5624}
5625
5626impl BufferCollectionCheckAllBuffersAllocatedResponder {
5627    /// Sends a response to the FIDL transaction.
5628    ///
5629    /// Sets the channel to shutdown if an error occurs.
5630    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5631        let _result = self.send_raw(result);
5632        if _result.is_err() {
5633            self.control_handle.shutdown();
5634        }
5635        self.drop_without_shutdown();
5636        _result
5637    }
5638
5639    /// Similar to "send" but does not shutdown the channel if an error occurs.
5640    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5641        let _result = self.send_raw(result);
5642        self.drop_without_shutdown();
5643        _result
5644    }
5645
5646    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5647        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5648            fidl::encoding::EmptyStruct,
5649            Error,
5650        >>(
5651            fidl::encoding::FlexibleResult::new(result),
5652            self.tx_id,
5653            0x35a5fe77ce939c10,
5654            fidl::encoding::DynamicFlags::FLEXIBLE,
5655        )
5656    }
5657}
5658
5659#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
5660pub struct BufferCollectionTokenMarker;
5661
5662impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenMarker {
5663    type Proxy = BufferCollectionTokenProxy;
5664    type RequestStream = BufferCollectionTokenRequestStream;
5665    #[cfg(target_os = "fuchsia")]
5666    type SynchronousProxy = BufferCollectionTokenSynchronousProxy;
5667
5668    const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionToken";
5669}
5670
5671pub trait BufferCollectionTokenProxyInterface: Send + Sync {
5672    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
5673    fn r#sync(&self) -> Self::SyncResponseFut;
5674    fn r#release(&self) -> Result<(), fidl::Error>;
5675    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
5676    fn r#set_debug_client_info(
5677        &self,
5678        payload: &NodeSetDebugClientInfoRequest,
5679    ) -> Result<(), fidl::Error>;
5680    fn r#set_debug_timeout_log_deadline(
5681        &self,
5682        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5683    ) -> Result<(), fidl::Error>;
5684    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
5685    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
5686        + Send;
5687    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
5688    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
5689        + Send;
5690    fn r#is_alternate_for(
5691        &self,
5692        payload: NodeIsAlternateForRequest,
5693    ) -> Self::IsAlternateForResponseFut;
5694    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
5695        + Send;
5696    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
5697    fn r#set_weak(&self) -> Result<(), fidl::Error>;
5698    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
5699    fn r#attach_node_tracking(
5700        &self,
5701        payload: NodeAttachNodeTrackingRequest,
5702    ) -> Result<(), fidl::Error>;
5703    type DuplicateSyncResponseFut: std::future::Future<
5704            Output = Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error>,
5705        > + Send;
5706    fn r#duplicate_sync(
5707        &self,
5708        payload: &BufferCollectionTokenDuplicateSyncRequest,
5709    ) -> Self::DuplicateSyncResponseFut;
5710    fn r#duplicate(
5711        &self,
5712        payload: BufferCollectionTokenDuplicateRequest,
5713    ) -> Result<(), fidl::Error>;
5714    fn r#set_dispensable(&self) -> Result<(), fidl::Error>;
5715    fn r#create_buffer_collection_token_group(
5716        &self,
5717        payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
5718    ) -> Result<(), fidl::Error>;
5719}
5720#[derive(Debug)]
5721#[cfg(target_os = "fuchsia")]
5722pub struct BufferCollectionTokenSynchronousProxy {
5723    client: fidl::client::sync::Client,
5724}
5725
5726#[cfg(target_os = "fuchsia")]
5727impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenSynchronousProxy {
5728    type Proxy = BufferCollectionTokenProxy;
5729    type Protocol = BufferCollectionTokenMarker;
5730
5731    fn from_channel(inner: fidl::Channel) -> Self {
5732        Self::new(inner)
5733    }
5734
5735    fn into_channel(self) -> fidl::Channel {
5736        self.client.into_channel()
5737    }
5738
5739    fn as_channel(&self) -> &fidl::Channel {
5740        self.client.as_channel()
5741    }
5742}
5743
5744#[cfg(target_os = "fuchsia")]
5745impl BufferCollectionTokenSynchronousProxy {
5746    pub fn new(channel: fidl::Channel) -> Self {
5747        let protocol_name =
5748            <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
5749        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
5750    }
5751
5752    pub fn into_channel(self) -> fidl::Channel {
5753        self.client.into_channel()
5754    }
5755
5756    /// Waits until an event arrives and returns it. It is safe for other
5757    /// threads to make concurrent requests while waiting for an event.
5758    pub fn wait_for_event(
5759        &self,
5760        deadline: zx::MonotonicInstant,
5761    ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
5762        BufferCollectionTokenEvent::decode(self.client.wait_for_event(deadline)?)
5763    }
5764
5765    /// Ensure that previous messages have been received server side. This is
5766    /// particularly useful after previous messages that created new tokens,
5767    /// because a token must be known to the sysmem server before sending the
5768    /// token to another participant.
5769    ///
5770    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
5771    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
5772    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
5773    /// to mitigate the possibility of a hostile/fake
5774    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
5775    /// Another way is to pass the token to
5776    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
5777    /// the token as part of exchanging it for a
5778    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
5779    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
5780    /// of stalling.
5781    ///
5782    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
5783    /// and then starting and completing a `Sync`, it's then safe to send the
5784    /// `BufferCollectionToken` client ends to other participants knowing the
5785    /// server will recognize the tokens when they're sent by the other
5786    /// participants to sysmem in a
5787    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
5788    /// efficient way to create tokens while avoiding unnecessary round trips.
5789    ///
5790    /// Other options include waiting for each
5791    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
5792    /// individually (using separate call to `Sync` after each), or calling
5793    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
5794    /// converted to a `BufferCollection` via
5795    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
5796    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
5797    /// the sync step and can create multiple tokens at once.
5798    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
5799        let _response = self.client.send_query::<
5800            fidl::encoding::EmptyPayload,
5801            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
5802        >(
5803            (),
5804            0x11ac2555cf575b54,
5805            fidl::encoding::DynamicFlags::FLEXIBLE,
5806            ___deadline,
5807        )?
5808        .into_result::<BufferCollectionTokenMarker>("sync")?;
5809        Ok(_response)
5810    }
5811
5812    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
5813    ///
5814    /// Normally a participant will convert a `BufferCollectionToken` into a
5815    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
5816    /// `Release` via the token (and then close the channel immediately or
5817    /// shortly later in response to server closing the server end), which
5818    /// avoids causing buffer collection failure. Without a prior `Release`,
5819    /// closing the `BufferCollectionToken` client end will cause buffer
5820    /// collection failure.
5821    ///
5822    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
5823    ///
5824    /// By default the server handles unexpected closure of a
5825    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
5826    /// first) by failing the buffer collection. Partly this is to expedite
5827    /// closing VMO handles to reclaim memory when any participant fails. If a
5828    /// participant would like to cleanly close a `BufferCollection` without
5829    /// causing buffer collection failure, the participant can send `Release`
5830    /// before closing the `BufferCollection` client end. The `Release` can
5831    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
5832    /// buffer collection won't require constraints from this node in order to
5833    /// allocate. If after `SetConstraints`, the constraints are retained and
5834    /// aggregated, despite the lack of `BufferCollection` connection at the
5835    /// time of constraints aggregation.
5836    ///
5837    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
5838    ///
5839    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
5840    /// end (without `Release` first) will trigger failure of the buffer
5841    /// collection. To close a `BufferCollectionTokenGroup` channel without
5842    /// failing the buffer collection, ensure that AllChildrenPresent() has been
5843    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
5844    /// client end.
5845    ///
5846    /// If `Release` occurs before
5847    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
5848    /// buffer collection will fail (triggered by reception of `Release` without
5849    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
5850    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
5851    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
5852    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
5853    /// close requires `AllChildrenPresent` (if not already sent), then
5854    /// `Release`, then close client end.
5855    ///
5856    /// If `Release` occurs after `AllChildrenPresent`, the children and all
5857    /// their constraints remain intact (just as they would if the
5858    /// `BufferCollectionTokenGroup` channel had remained open), and the client
5859    /// end close doesn't trigger buffer collection failure.
5860    ///
5861    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
5862    ///
5863    /// For brevity, the per-channel-protocol paragraphs above ignore the
5864    /// separate failure domain created by
5865    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
5866    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
5867    /// unexpectedly closes (without `Release` first) and that client end is
5868    /// under a failure domain, instead of failing the whole buffer collection,
5869    /// the failure domain is failed, but the buffer collection itself is
5870    /// isolated from failure of the failure domain. Such failure domains can be
5871    /// nested, in which case only the inner-most failure domain in which the
5872    /// `Node` resides fails.
5873    pub fn r#release(&self) -> Result<(), fidl::Error> {
5874        self.client.send::<fidl::encoding::EmptyPayload>(
5875            (),
5876            0x6a5cae7d6d6e04c6,
5877            fidl::encoding::DynamicFlags::FLEXIBLE,
5878        )
5879    }
5880
5881    /// Set a name for VMOs in this buffer collection.
5882    ///
5883    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
5884    /// will be truncated to fit. The name of the vmo will be suffixed with the
5885    /// buffer index within the collection (if the suffix fits within
5886    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
5887    /// listed in the inspect data.
5888    ///
5889    /// The name only affects VMOs allocated after the name is set; this call
5890    /// does not rename existing VMOs. If multiple clients set different names
5891    /// then the larger priority value will win. Setting a new name with the
5892    /// same priority as a prior name doesn't change the name.
5893    ///
5894    /// All table fields are currently required.
5895    ///
5896    /// + request `priority` The name is only set if this is the first `SetName`
5897    ///   or if `priority` is greater than any previous `priority` value in
5898    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
5899    /// + request `name` The name for VMOs created under this buffer collection.
5900    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
5901        self.client.send::<NodeSetNameRequest>(
5902            payload,
5903            0xb41f1624f48c1e9,
5904            fidl::encoding::DynamicFlags::FLEXIBLE,
5905        )
5906    }
5907
5908    /// Set information about the current client that can be used by sysmem to
5909    /// help diagnose leaking memory and allocation stalls waiting for a
5910    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
5911    ///
5912    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
5913    /// `Node`(s) derived from this `Node`, unless overriden by
5914    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
5915    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
5916    ///
5917    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
5918    /// `Allocator` is the most efficient way to ensure that all
5919    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
5920    /// set, and is also more efficient than separately sending the same debug
5921    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
5922    /// created [`fuchsia.sysmem2/Node`].
5923    ///
5924    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
5925    /// indicate which client is closing their channel first, leading to subtree
5926    /// failure (which can be normal if the purpose of the subtree is over, but
5927    /// if happening earlier than expected, the client-channel-specific name can
5928    /// help diagnose where the failure is first coming from, from sysmem's
5929    /// point of view).
5930    ///
5931    /// All table fields are currently required.
5932    ///
5933    /// + request `name` This can be an arbitrary string, but the current
5934    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
5935    /// + request `id` This can be an arbitrary id, but the current process ID
5936    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
5937    pub fn r#set_debug_client_info(
5938        &self,
5939        mut payload: &NodeSetDebugClientInfoRequest,
5940    ) -> Result<(), fidl::Error> {
5941        self.client.send::<NodeSetDebugClientInfoRequest>(
5942            payload,
5943            0x5cde8914608d99b1,
5944            fidl::encoding::DynamicFlags::FLEXIBLE,
5945        )
5946    }
5947
5948    /// Sysmem logs a warning if sysmem hasn't seen
5949    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
5950    /// within 5 seconds after creation of a new collection.
5951    ///
5952    /// Clients can call this method to change when the log is printed. If
5953    /// multiple client set the deadline, it's unspecified which deadline will
5954    /// take effect.
5955    ///
5956    /// In most cases the default works well.
5957    ///
5958    /// All table fields are currently required.
5959    ///
5960    /// + request `deadline` The time at which sysmem will start trying to log
5961    ///   the warning, unless all constraints are with sysmem by then.
5962    pub fn r#set_debug_timeout_log_deadline(
5963        &self,
5964        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5965    ) -> Result<(), fidl::Error> {
5966        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
5967            payload,
5968            0x716b0af13d5c0806,
5969            fidl::encoding::DynamicFlags::FLEXIBLE,
5970        )
5971    }
5972
5973    /// This enables verbose logging for the buffer collection.
5974    ///
5975    /// Verbose logging includes constraints set via
5976    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
5977    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
5978    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
5979    /// the tree of `Node`(s).
5980    ///
5981    /// Normally sysmem prints only a single line complaint when aggregation
5982    /// fails, with just the specific detailed reason that aggregation failed,
5983    /// with little surrounding context.  While this is often enough to diagnose
5984    /// a problem if only a small change was made and everything was working
5985    /// before the small change, it's often not particularly helpful for getting
5986    /// a new buffer collection to work for the first time.  Especially with
5987    /// more complex trees of nodes, involving things like
5988    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
5989    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
5990    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
5991    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
5992    /// looks like and why it's failing a logical allocation, or why a tree or
5993    /// subtree is failing sooner than expected.
5994    ///
5995    /// The intent of the extra logging is to be acceptable from a performance
5996    /// point of view, under the assumption that verbose logging is only enabled
5997    /// on a low number of buffer collections. If we're not tracking down a bug,
5998    /// we shouldn't send this message.
5999    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
6000        self.client.send::<fidl::encoding::EmptyPayload>(
6001            (),
6002            0x5209c77415b4dfad,
6003            fidl::encoding::DynamicFlags::FLEXIBLE,
6004        )
6005    }
6006
6007    /// This gets a handle that can be used as a parameter to
6008    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
6009    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
6010    /// client obtained this handle from this `Node`.
6011    ///
6012    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
6013    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
6014    /// despite the two calls typically being on different channels.
6015    ///
6016    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6017    ///
6018    /// All table fields are currently required.
6019    ///
6020    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6021    ///   different `Node` channel, to prove that the client obtained the handle
6022    ///   from this `Node`.
6023    pub fn r#get_node_ref(
6024        &self,
6025        ___deadline: zx::MonotonicInstant,
6026    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
6027        let _response = self.client.send_query::<
6028            fidl::encoding::EmptyPayload,
6029            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
6030        >(
6031            (),
6032            0x5b3d0e51614df053,
6033            fidl::encoding::DynamicFlags::FLEXIBLE,
6034            ___deadline,
6035        )?
6036        .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
6037        Ok(_response)
6038    }
6039
6040    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6041    /// rooted at a different child token of a common parent
6042    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6043    /// passed-in `node_ref`.
6044    ///
6045    /// This call is for assisting with admission control de-duplication, and
6046    /// with debugging.
6047    ///
6048    /// The `node_ref` must be obtained using
6049    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6050    ///
6051    /// The `node_ref` can be a duplicated handle; it's not necessary to call
6052    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6053    ///
6054    /// If a calling token may not actually be a valid token at all due to a
6055    /// potentially hostile/untrusted provider of the token, call
6056    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6057    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6058    /// never responds due to a calling token not being a real token (not really
6059    /// talking to sysmem).  Another option is to call
6060    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6061    /// which also validates the token along with converting it to a
6062    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6063    ///
6064    /// All table fields are currently required.
6065    ///
6066    /// - response `is_alternate`
6067    ///   - true: The first parent node in common between the calling node and
6068    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
6069    ///     that the calling `Node` and the `node_ref` `Node` will not have both
6070    ///     their constraints apply - rather sysmem will choose one or the other
6071    ///     of the constraints - never both.  This is because only one child of
6072    ///     a `BufferCollectionTokenGroup` is selected during logical
6073    ///     allocation, with only that one child's subtree contributing to
6074    ///     constraints aggregation.
6075    ///   - false: The first parent node in common between the calling `Node`
6076    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6077    ///     Currently, this means the first parent node in common is a
6078    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
6079    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
6080    ///     `Node` may have both their constraints apply during constraints
6081    ///     aggregation of the logical allocation, if both `Node`(s) are
6082    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6083    ///     this case, there is no `BufferCollectionTokenGroup` that will
6084    ///     directly prevent the two `Node`(s) from both being selected and
6085    ///     their constraints both aggregated, but even when false, one or both
6086    ///     `Node`(s) may still be eliminated from consideration if one or both
6087    ///     `Node`(s) has a direct or indirect parent
6088    ///     `BufferCollectionTokenGroup` which selects a child subtree other
6089    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
6090    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6091    ///   associated with the same buffer collection as the calling `Node`.
6092    ///   Another reason for this error is if the `node_ref` is an
6093    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6094    ///   a real `node_ref` obtained from `GetNodeRef`.
6095    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6096    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6097    ///   the needed rights expected on a real `node_ref`.
6098    /// * No other failing status codes are returned by this call.  However,
6099    ///   sysmem may add additional codes in future, so the client should have
6100    ///   sensible default handling for any failing status code.
6101    pub fn r#is_alternate_for(
6102        &self,
6103        mut payload: NodeIsAlternateForRequest,
6104        ___deadline: zx::MonotonicInstant,
6105    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
6106        let _response = self.client.send_query::<
6107            NodeIsAlternateForRequest,
6108            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
6109        >(
6110            &mut payload,
6111            0x3a58e00157e0825,
6112            fidl::encoding::DynamicFlags::FLEXIBLE,
6113            ___deadline,
6114        )?
6115        .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
6116        Ok(_response.map(|x| x))
6117    }
6118
6119    /// Get the buffer collection ID. This ID is also available from
6120    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6121    /// within the collection).
6122    ///
6123    /// This call is mainly useful in situations where we can't convey a
6124    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6125    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6126    /// handle, which can be joined back up with a `BufferCollection` client end
6127    /// that was created via a different path. Prefer to convey a
6128    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6129    ///
6130    /// Trusting a `buffer_collection_id` value from a source other than sysmem
6131    /// is analogous to trusting a koid value from a source other than zircon.
6132    /// Both should be avoided unless really necessary, and both require
6133    /// caution. In some situations it may be reasonable to refer to a
6134    /// pre-established `BufferCollection` by `buffer_collection_id` via a
6135    /// protocol for efficiency reasons, but an incoming value purporting to be
6136    /// a `buffer_collection_id` is not sufficient alone to justify granting the
6137    /// sender of the `buffer_collection_id` any capability. The sender must
6138    /// first prove to a receiver that the sender has/had a VMO or has/had a
6139    /// `BufferCollectionToken` to the same collection by sending a handle that
6140    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6141    /// `buffer_collection_id` value. The receiver should take care to avoid
6142    /// assuming that a sender had a `BufferCollectionToken` in cases where the
6143    /// sender has only proven that the sender had a VMO.
6144    ///
6145    /// - response `buffer_collection_id` This ID is unique per buffer
6146    ///   collection per boot. Each buffer is uniquely identified by the
6147    ///   `buffer_collection_id` and `buffer_index` together.
6148    pub fn r#get_buffer_collection_id(
6149        &self,
6150        ___deadline: zx::MonotonicInstant,
6151    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
6152        let _response = self.client.send_query::<
6153            fidl::encoding::EmptyPayload,
6154            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
6155        >(
6156            (),
6157            0x77d19a494b78ba8c,
6158            fidl::encoding::DynamicFlags::FLEXIBLE,
6159            ___deadline,
6160        )?
6161        .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
6162        Ok(_response)
6163    }
6164
6165    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6166    /// created after this message to weak, which means that a client's `Node`
6167    /// client end (or a child created after this message) is not alone
6168    /// sufficient to keep allocated VMOs alive.
6169    ///
6170    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6171    /// `close_weak_asap`.
6172    ///
6173    /// This message is only permitted before the `Node` becomes ready for
6174    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6175    ///   * `BufferCollectionToken`: any time
6176    ///   * `BufferCollection`: before `SetConstraints`
6177    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6178    ///
6179    /// Currently, no conversion from strong `Node` to weak `Node` after ready
6180    /// for allocation is provided, but a client can simulate that by creating
6181    /// an additional `Node` before allocation and setting that additional
6182    /// `Node` to weak, and then potentially at some point later sending
6183    /// `Release` and closing the client end of the client's strong `Node`, but
6184    /// keeping the client's weak `Node`.
6185    ///
6186    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6187    /// collection failure (all `Node` client end(s) will see
6188    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6189    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6190    /// this situation until all `Node`(s) are ready for allocation. For initial
6191    /// allocation to succeed, at least one strong `Node` is required to exist
6192    /// at allocation time, but after that client receives VMO handles, that
6193    /// client can `BufferCollection.Release` and close the client end without
6194    /// causing this type of failure.
6195    ///
6196    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6197    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6198    /// separately as appropriate.
6199    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6200        self.client.send::<fidl::encoding::EmptyPayload>(
6201            (),
6202            0x22dd3ea514eeffe1,
6203            fidl::encoding::DynamicFlags::FLEXIBLE,
6204        )
6205    }
6206
6207    /// This indicates to sysmem that the client is prepared to pay attention to
6208    /// `close_weak_asap`.
6209    ///
6210    /// If sent, this message must be before
6211    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6212    ///
6213    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6214    /// send this message before `WaitForAllBuffersAllocated`, or a parent
6215    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6216    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6217    /// trigger buffer collection failure.
6218    ///
6219    /// This message is necessary because weak sysmem VMOs have not always been
6220    /// a thing, so older clients are not aware of the need to pay attention to
6221    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6222    /// sysmem weak VMO handles asap. By having this message and requiring
6223    /// participants to indicate their acceptance of this aspect of the overall
6224    /// protocol, we avoid situations where an older client is delivered a weak
6225    /// VMO without any way for sysmem to get that VMO to close quickly later
6226    /// (and on a per-buffer basis).
6227    ///
6228    /// A participant that doesn't handle `close_weak_asap` and also doesn't
6229    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
6230    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
6231    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
6232    /// same participant has a child/delegate which does retrieve VMOs, that
6233    /// child/delegate will need to send `SetWeakOk` before
6234    /// `WaitForAllBuffersAllocated`.
6235    ///
6236    /// + request `for_child_nodes_also` If present and true, this means direct
6237    ///   child nodes of this node created after this message plus all
6238    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
6239    ///   those nodes. Any child node of this node that was created before this
6240    ///   message is not included. This setting is "sticky" in the sense that a
6241    ///   subsequent `SetWeakOk` without this bool set to true does not reset
6242    ///   the server-side bool. If this creates a problem for a participant, a
6243    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
6244    ///   tokens instead, as appropriate. A participant should only set
6245    ///   `for_child_nodes_also` true if the participant can really promise to
6246    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
6247    ///   weak VMO handles held by participants holding the corresponding child
6248    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
6249    ///   which are using sysmem(1) can be weak, despite the clients of those
6250    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
6251    ///   direct way to find out about `close_weak_asap`. This only applies to
6252    ///   descendents of this `Node` which are using sysmem(1), not to this
6253    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
6254    ///   token, which will fail allocation unless an ancestor of this `Node`
6255    ///   specified `for_child_nodes_also` true.
6256    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
6257        self.client.send::<NodeSetWeakOkRequest>(
6258            &mut payload,
6259            0x38a44fc4d7724be9,
6260            fidl::encoding::DynamicFlags::FLEXIBLE,
6261        )
6262    }
6263
6264    /// The server_end will be closed after this `Node` and any child nodes have
6265    /// have released their buffer counts, making those counts available for
6266    /// reservation by a different `Node` via
6267    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
6268    ///
6269    /// The `Node` buffer counts may not be released until the entire tree of
6270    /// `Node`(s) is closed or failed, because
6271    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
6272    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
6273    /// `Node` buffer counts remain reserved until the orphaned node is later
6274    /// cleaned up.
6275    ///
6276    /// If the `Node` exceeds a fairly large number of attached eventpair server
6277    /// ends, a log message will indicate this and the `Node` (and the
6278    /// appropriate) sub-tree will fail.
6279    ///
6280    /// The `server_end` will remain open when
6281    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
6282    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
6283    /// [`fuchsia.sysmem2/BufferCollection`].
6284    ///
6285    /// This message can also be used with a
6286    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6287    pub fn r#attach_node_tracking(
6288        &self,
6289        mut payload: NodeAttachNodeTrackingRequest,
6290    ) -> Result<(), fidl::Error> {
6291        self.client.send::<NodeAttachNodeTrackingRequest>(
6292            &mut payload,
6293            0x3f22f2a293d3cdac,
6294            fidl::encoding::DynamicFlags::FLEXIBLE,
6295        )
6296    }
6297
6298    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
6299    /// one, referring to the same buffer collection.
6300    ///
6301    /// The created tokens are children of this token in the
6302    /// [`fuchsia.sysmem2/Node`] heirarchy.
6303    ///
6304    /// This method can be used to add more participants, by transferring the
6305    /// newly created tokens to additional participants.
6306    ///
6307    /// A new token will be returned for each entry in the
6308    /// `rights_attenuation_masks` array.
6309    ///
6310    /// If the called token may not actually be a valid token due to a
6311    /// potentially hostile/untrusted provider of the token, consider using
6312    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6313    /// instead of potentially getting stuck indefinitely if
6314    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
6315    /// due to the calling token not being a real token.
6316    ///
6317    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
6318    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
6319    /// method, because the sync step is included in this call, at the cost of a
6320    /// round trip during this call.
6321    ///
6322    /// All tokens must be turned in to sysmem via
6323    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6324    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6325    /// successfully allocate buffers (or to logically allocate buffers in the
6326    /// case of subtrees involving
6327    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
6328    ///
6329    /// All table fields are currently required.
6330    ///
6331    /// + request `rights_attenuation_mask` In each entry of
6332    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
6333    ///   in the buffer VMO rights obtainable via the corresponding returned
6334    ///   token. This allows an initiator or intermediary participant to
6335    ///   attenuate the rights available to a participant. This does not allow a
6336    ///   participant to gain rights that the participant doesn't already have.
6337    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
6338    ///   attenuation should be applied.
6339    /// - response `tokens` The client ends of each newly created token.
6340    pub fn r#duplicate_sync(
6341        &self,
6342        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
6343        ___deadline: zx::MonotonicInstant,
6344    ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
6345        let _response = self.client.send_query::<
6346            BufferCollectionTokenDuplicateSyncRequest,
6347            fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
6348        >(
6349            payload,
6350            0x1c1af9919d1ca45c,
6351            fidl::encoding::DynamicFlags::FLEXIBLE,
6352            ___deadline,
6353        )?
6354        .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
6355        Ok(_response)
6356    }
6357
6358    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
6359    /// one, referring to the same buffer collection.
6360    ///
6361    /// The created token is a child of this token in the
6362    /// [`fuchsia.sysmem2/Node`] heirarchy.
6363    ///
6364    /// This method can be used to add a participant, by transferring the newly
6365    /// created token to another participant.
6366    ///
6367    /// This one-way message can be used instead of the two-way
6368    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
6369    /// performance sensitive cases where it would be undesireable to wait for
6370    /// sysmem to respond to
6371    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
6372    /// client code isn't structured to make it easy to duplicate all the needed
6373    /// tokens at once.
6374    ///
6375    /// After sending one or more `Duplicate` messages, and before sending the
6376    /// newly created child tokens to other participants (or to other
6377    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
6378    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
6379    /// `Sync` call can be made on the token, or on the `BufferCollection`
6380    /// obtained by passing this token to `BindSharedCollection`.  Either will
6381    /// ensure that the server knows about the tokens created via `Duplicate`
6382    /// before the other participant sends the token to the server via separate
6383    /// `Allocator` channel.
6384    ///
6385    /// All tokens must be turned in via
6386    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6387    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6388    /// successfully allocate buffers.
6389    ///
6390    /// All table fields are currently required.
6391    ///
6392    /// + request `rights_attenuation_mask` The rights bits that are zero in
6393    ///   this mask will be absent in the buffer VMO rights obtainable via the
6394    ///   client end of `token_request`. This allows an initiator or
6395    ///   intermediary participant to attenuate the rights available to a
6396    ///   delegate participant. This does not allow a participant to gain rights
6397    ///   that the participant doesn't already have. The value
6398    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
6399    ///   should be applied.
6400    ///   + These values for rights_attenuation_mask result in no attenuation:
6401    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
6402    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
6403    ///       computed)
6404    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
6405    /// + request `token_request` is the server end of a `BufferCollectionToken`
6406    ///   channel. The client end of this channel acts as another participant in
6407    ///   the shared buffer collection.
6408    pub fn r#duplicate(
6409        &self,
6410        mut payload: BufferCollectionTokenDuplicateRequest,
6411    ) -> Result<(), fidl::Error> {
6412        self.client.send::<BufferCollectionTokenDuplicateRequest>(
6413            &mut payload,
6414            0x73e78f92ee7fb887,
6415            fidl::encoding::DynamicFlags::FLEXIBLE,
6416        )
6417    }
6418
6419    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
6420    ///
6421    /// When the `BufferCollectionToken` is converted to a
6422    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
6423    /// the `BufferCollection` also.
6424    ///
6425    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
6426    /// client end without having sent
6427    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
6428    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
6429    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
6430    /// to the root `Node`, which fails the whole buffer collection. In
6431    /// contrast, a dispensable `Node` can fail after buffers are allocated
6432    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
6433    /// heirarchy.
6434    ///
6435    /// The dispensable `Node` participates in constraints aggregation along
6436    /// with its parent before buffer allocation. If the dispensable `Node`
6437    /// fails before buffers are allocated, the failure propagates to the
6438    /// dispensable `Node`'s parent.
6439    ///
6440    /// After buffers are allocated, failure of the dispensable `Node` (or any
6441    /// child of the dispensable `Node`) does not propagate to the dispensable
6442    /// `Node`'s parent. Failure does propagate from a normal child of a
6443    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
6444    /// blocked from reaching its parent if the child is attached using
6445    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
6446    /// dispensable and the failure occurred after allocation.
6447    ///
6448    /// A dispensable `Node` can be used in cases where a participant needs to
6449    /// provide constraints, but after buffers are allocated, the participant
6450    /// can fail without causing buffer collection failure from the parent
6451    /// `Node`'s point of view.
6452    ///
6453    /// In contrast, `BufferCollection.AttachToken` can be used to create a
6454    /// `BufferCollectionToken` which does not participate in constraints
6455    /// aggregation with its parent `Node`, and whose failure at any time does
6456    /// not propagate to its parent `Node`, and whose potential delay providing
6457    /// constraints does not prevent the parent `Node` from completing its
6458    /// buffer allocation.
6459    ///
6460    /// An initiator (creator of the root `Node` using
6461    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
6462    /// scenarios choose to initially use a dispensable `Node` for a first
6463    /// instance of a participant, and then later if the first instance of that
6464    /// participant fails, a new second instance of that participant my be given
6465    /// a `BufferCollectionToken` created with `AttachToken`.
6466    ///
6467    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
6468    /// shortly before sending the dispensable `BufferCollectionToken` to a
6469    /// delegate participant. Because `SetDispensable` prevents propagation of
6470    /// child `Node` failure to parent `Node`(s), if the client was relying on
6471    /// noticing child failure via failure of the parent `Node` retained by the
6472    /// client, the client may instead need to notice failure via other means.
6473    /// If other means aren't available/convenient, the client can instead
6474    /// retain the dispensable `Node` and create a child `Node` under that to
6475    /// send to the delegate participant, retaining this `Node` in order to
6476    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
6477    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
6478    /// (e.g. starting a new instance of the delegate participant and handing it
6479    /// a `BufferCollectionToken` created using
6480    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
6481    /// and clean up in a client-specific way).
6482    ///
6483    /// While it is possible (and potentially useful) to `SetDispensable` on a
6484    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
6485    /// to later replace a failed dispensable `Node` that was a direct child of
6486    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
6487    /// (since there's no `AttachToken` on a group). Instead, to enable
6488    /// `AttachToken` replacement in this case, create an additional
6489    /// non-dispensable token that's a direct child of the group and make the
6490    /// existing dispensable token a child of the additional token.  This way,
6491    /// the additional token that is a direct child of the group has
6492    /// `BufferCollection.AttachToken` which can be used to replace the failed
6493    /// dispensable token.
6494    ///
6495    /// `SetDispensable` on an already-dispensable token is idempotent.
6496    pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
6497        self.client.send::<fidl::encoding::EmptyPayload>(
6498            (),
6499            0x228acf979254df8b,
6500            fidl::encoding::DynamicFlags::FLEXIBLE,
6501        )
6502    }
6503
6504    /// Create a logical OR among a set of tokens, called a
6505    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6506    ///
6507    /// Most sysmem clients and many participants don't need to care about this
6508    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
6509    /// a participant wants to attempt to include one set of delegate
6510    /// participants, but if constraints don't combine successfully that way,
6511    /// fall back to a different (possibly overlapping) set of delegate
6512    /// participants, and/or fall back to a less demanding strategy (in terms of
6513    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
6514    /// across all involved delegate participants). In such cases, a
6515    /// `BufferCollectionTokenGroup` is useful.
6516    ///
6517    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
6518    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
6519    /// which are not selected during aggregation will fail (close), which a
6520    /// potential participant should notice when their `BufferCollection`
6521    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
6522    /// clean up the speculative usage that didn't end up happening (this is
6523    /// simimlar to a normal `BufferCollection` server end closing on failure to
6524    /// allocate a logical buffer collection or later async failure of a buffer
6525    /// collection).
6526    ///
6527    /// See comments on protocol `BufferCollectionTokenGroup`.
6528    ///
6529    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
6530    /// applied to the whole group can be achieved with a
6531    /// `BufferCollectionToken` for this purpose as a direct parent of the
6532    /// `BufferCollectionTokenGroup`.
6533    ///
6534    /// All table fields are currently required.
6535    ///
6536    /// + request `group_request` The server end of a
6537    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
6538    pub fn r#create_buffer_collection_token_group(
6539        &self,
6540        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
6541    ) -> Result<(), fidl::Error> {
6542        self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
6543            &mut payload,
6544            0x30f8d48e77bd36f2,
6545            fidl::encoding::DynamicFlags::FLEXIBLE,
6546        )
6547    }
6548}
6549
6550#[cfg(target_os = "fuchsia")]
6551impl From<BufferCollectionTokenSynchronousProxy> for zx::NullableHandle {
6552    fn from(value: BufferCollectionTokenSynchronousProxy) -> Self {
6553        value.into_channel().into()
6554    }
6555}
6556
6557#[cfg(target_os = "fuchsia")]
6558impl From<fidl::Channel> for BufferCollectionTokenSynchronousProxy {
6559    fn from(value: fidl::Channel) -> Self {
6560        Self::new(value)
6561    }
6562}
6563
6564#[cfg(target_os = "fuchsia")]
6565impl fidl::endpoints::FromClient for BufferCollectionTokenSynchronousProxy {
6566    type Protocol = BufferCollectionTokenMarker;
6567
6568    fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>) -> Self {
6569        Self::new(value.into_channel())
6570    }
6571}
6572
6573#[derive(Debug, Clone)]
6574pub struct BufferCollectionTokenProxy {
6575    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
6576}
6577
6578impl fidl::endpoints::Proxy for BufferCollectionTokenProxy {
6579    type Protocol = BufferCollectionTokenMarker;
6580
6581    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
6582        Self::new(inner)
6583    }
6584
6585    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
6586        self.client.into_channel().map_err(|client| Self { client })
6587    }
6588
6589    fn as_channel(&self) -> &::fidl::AsyncChannel {
6590        self.client.as_channel()
6591    }
6592}
6593
6594impl BufferCollectionTokenProxy {
6595    /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionToken.
6596    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
6597        let protocol_name =
6598            <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
6599        Self { client: fidl::client::Client::new(channel, protocol_name) }
6600    }
6601
6602    /// Get a Stream of events from the remote end of the protocol.
6603    ///
6604    /// # Panics
6605    ///
6606    /// Panics if the event stream was already taken.
6607    pub fn take_event_stream(&self) -> BufferCollectionTokenEventStream {
6608        BufferCollectionTokenEventStream { event_receiver: self.client.take_event_receiver() }
6609    }
6610
6611    /// Ensure that previous messages have been received server side. This is
6612    /// particularly useful after previous messages that created new tokens,
6613    /// because a token must be known to the sysmem server before sending the
6614    /// token to another participant.
6615    ///
6616    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
6617    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
6618    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
6619    /// to mitigate the possibility of a hostile/fake
6620    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
6621    /// Another way is to pass the token to
6622    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
6623    /// the token as part of exchanging it for a
6624    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
6625    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
6626    /// of stalling.
6627    ///
6628    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
6629    /// and then starting and completing a `Sync`, it's then safe to send the
6630    /// `BufferCollectionToken` client ends to other participants knowing the
6631    /// server will recognize the tokens when they're sent by the other
6632    /// participants to sysmem in a
6633    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
6634    /// efficient way to create tokens while avoiding unnecessary round trips.
6635    ///
6636    /// Other options include waiting for each
6637    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
6638    /// individually (using separate call to `Sync` after each), or calling
6639    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
6640    /// converted to a `BufferCollection` via
6641    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
6642    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
6643    /// the sync step and can create multiple tokens at once.
6644    pub fn r#sync(
6645        &self,
6646    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
6647        BufferCollectionTokenProxyInterface::r#sync(self)
6648    }
6649
6650    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
6651    ///
6652    /// Normally a participant will convert a `BufferCollectionToken` into a
6653    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
6654    /// `Release` via the token (and then close the channel immediately or
6655    /// shortly later in response to server closing the server end), which
6656    /// avoids causing buffer collection failure. Without a prior `Release`,
6657    /// closing the `BufferCollectionToken` client end will cause buffer
6658    /// collection failure.
6659    ///
6660    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
6661    ///
6662    /// By default the server handles unexpected closure of a
6663    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
6664    /// first) by failing the buffer collection. Partly this is to expedite
6665    /// closing VMO handles to reclaim memory when any participant fails. If a
6666    /// participant would like to cleanly close a `BufferCollection` without
6667    /// causing buffer collection failure, the participant can send `Release`
6668    /// before closing the `BufferCollection` client end. The `Release` can
6669    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
6670    /// buffer collection won't require constraints from this node in order to
6671    /// allocate. If after `SetConstraints`, the constraints are retained and
6672    /// aggregated, despite the lack of `BufferCollection` connection at the
6673    /// time of constraints aggregation.
6674    ///
6675    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
6676    ///
6677    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
6678    /// end (without `Release` first) will trigger failure of the buffer
6679    /// collection. To close a `BufferCollectionTokenGroup` channel without
6680    /// failing the buffer collection, ensure that AllChildrenPresent() has been
6681    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
6682    /// client end.
6683    ///
6684    /// If `Release` occurs before
6685    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
6686    /// buffer collection will fail (triggered by reception of `Release` without
6687    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
6688    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
6689    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
6690    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
6691    /// close requires `AllChildrenPresent` (if not already sent), then
6692    /// `Release`, then close client end.
6693    ///
6694    /// If `Release` occurs after `AllChildrenPresent`, the children and all
6695    /// their constraints remain intact (just as they would if the
6696    /// `BufferCollectionTokenGroup` channel had remained open), and the client
6697    /// end close doesn't trigger buffer collection failure.
6698    ///
6699    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
6700    ///
6701    /// For brevity, the per-channel-protocol paragraphs above ignore the
6702    /// separate failure domain created by
6703    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
6704    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
6705    /// unexpectedly closes (without `Release` first) and that client end is
6706    /// under a failure domain, instead of failing the whole buffer collection,
6707    /// the failure domain is failed, but the buffer collection itself is
6708    /// isolated from failure of the failure domain. Such failure domains can be
6709    /// nested, in which case only the inner-most failure domain in which the
6710    /// `Node` resides fails.
6711    pub fn r#release(&self) -> Result<(), fidl::Error> {
6712        BufferCollectionTokenProxyInterface::r#release(self)
6713    }
6714
6715    /// Set a name for VMOs in this buffer collection.
6716    ///
6717    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
6718    /// will be truncated to fit. The name of the vmo will be suffixed with the
6719    /// buffer index within the collection (if the suffix fits within
6720    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
6721    /// listed in the inspect data.
6722    ///
6723    /// The name only affects VMOs allocated after the name is set; this call
6724    /// does not rename existing VMOs. If multiple clients set different names
6725    /// then the larger priority value will win. Setting a new name with the
6726    /// same priority as a prior name doesn't change the name.
6727    ///
6728    /// All table fields are currently required.
6729    ///
6730    /// + request `priority` The name is only set if this is the first `SetName`
6731    ///   or if `priority` is greater than any previous `priority` value in
6732    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
6733    /// + request `name` The name for VMOs created under this buffer collection.
6734    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
6735        BufferCollectionTokenProxyInterface::r#set_name(self, payload)
6736    }
6737
6738    /// Set information about the current client that can be used by sysmem to
6739    /// help diagnose leaking memory and allocation stalls waiting for a
6740    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
6741    ///
6742    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
6743    /// `Node`(s) derived from this `Node`, unless overriden by
6744    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
6745    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
6746    ///
6747    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
6748    /// `Allocator` is the most efficient way to ensure that all
6749    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
6750    /// set, and is also more efficient than separately sending the same debug
6751    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
6752    /// created [`fuchsia.sysmem2/Node`].
6753    ///
6754    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
6755    /// indicate which client is closing their channel first, leading to subtree
6756    /// failure (which can be normal if the purpose of the subtree is over, but
6757    /// if happening earlier than expected, the client-channel-specific name can
6758    /// help diagnose where the failure is first coming from, from sysmem's
6759    /// point of view).
6760    ///
6761    /// All table fields are currently required.
6762    ///
6763    /// + request `name` This can be an arbitrary string, but the current
6764    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
6765    /// + request `id` This can be an arbitrary id, but the current process ID
6766    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
6767    pub fn r#set_debug_client_info(
6768        &self,
6769        mut payload: &NodeSetDebugClientInfoRequest,
6770    ) -> Result<(), fidl::Error> {
6771        BufferCollectionTokenProxyInterface::r#set_debug_client_info(self, payload)
6772    }
6773
6774    /// Sysmem logs a warning if sysmem hasn't seen
6775    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
6776    /// within 5 seconds after creation of a new collection.
6777    ///
6778    /// Clients can call this method to change when the log is printed. If
6779    /// multiple client set the deadline, it's unspecified which deadline will
6780    /// take effect.
6781    ///
6782    /// In most cases the default works well.
6783    ///
6784    /// All table fields are currently required.
6785    ///
6786    /// + request `deadline` The time at which sysmem will start trying to log
6787    ///   the warning, unless all constraints are with sysmem by then.
6788    pub fn r#set_debug_timeout_log_deadline(
6789        &self,
6790        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
6791    ) -> Result<(), fidl::Error> {
6792        BufferCollectionTokenProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
6793    }
6794
6795    /// This enables verbose logging for the buffer collection.
6796    ///
6797    /// Verbose logging includes constraints set via
6798    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
6799    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
6800    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
6801    /// the tree of `Node`(s).
6802    ///
6803    /// Normally sysmem prints only a single line complaint when aggregation
6804    /// fails, with just the specific detailed reason that aggregation failed,
6805    /// with little surrounding context.  While this is often enough to diagnose
6806    /// a problem if only a small change was made and everything was working
6807    /// before the small change, it's often not particularly helpful for getting
6808    /// a new buffer collection to work for the first time.  Especially with
6809    /// more complex trees of nodes, involving things like
6810    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
6811    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
6812    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
6813    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
6814    /// looks like and why it's failing a logical allocation, or why a tree or
6815    /// subtree is failing sooner than expected.
6816    ///
6817    /// The intent of the extra logging is to be acceptable from a performance
6818    /// point of view, under the assumption that verbose logging is only enabled
6819    /// on a low number of buffer collections. If we're not tracking down a bug,
6820    /// we shouldn't send this message.
6821    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
6822        BufferCollectionTokenProxyInterface::r#set_verbose_logging(self)
6823    }
6824
6825    /// This gets a handle that can be used as a parameter to
6826    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
6827    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
6828    /// client obtained this handle from this `Node`.
6829    ///
6830    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
6831    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
6832    /// despite the two calls typically being on different channels.
6833    ///
6834    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6835    ///
6836    /// All table fields are currently required.
6837    ///
6838    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6839    ///   different `Node` channel, to prove that the client obtained the handle
6840    ///   from this `Node`.
6841    pub fn r#get_node_ref(
6842        &self,
6843    ) -> fidl::client::QueryResponseFut<
6844        NodeGetNodeRefResponse,
6845        fidl::encoding::DefaultFuchsiaResourceDialect,
6846    > {
6847        BufferCollectionTokenProxyInterface::r#get_node_ref(self)
6848    }
6849
6850    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6851    /// rooted at a different child token of a common parent
6852    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6853    /// passed-in `node_ref`.
6854    ///
6855    /// This call is for assisting with admission control de-duplication, and
6856    /// with debugging.
6857    ///
6858    /// The `node_ref` must be obtained using
6859    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6860    ///
6861    /// The `node_ref` can be a duplicated handle; it's not necessary to call
6862    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6863    ///
6864    /// If a calling token may not actually be a valid token at all due to a
6865    /// potentially hostile/untrusted provider of the token, call
6866    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6867    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6868    /// never responds due to a calling token not being a real token (not really
6869    /// talking to sysmem).  Another option is to call
6870    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6871    /// which also validates the token along with converting it to a
6872    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6873    ///
6874    /// All table fields are currently required.
6875    ///
6876    /// - response `is_alternate`
6877    ///   - true: The first parent node in common between the calling node and
6878    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
6879    ///     that the calling `Node` and the `node_ref` `Node` will not have both
6880    ///     their constraints apply - rather sysmem will choose one or the other
6881    ///     of the constraints - never both.  This is because only one child of
6882    ///     a `BufferCollectionTokenGroup` is selected during logical
6883    ///     allocation, with only that one child's subtree contributing to
6884    ///     constraints aggregation.
6885    ///   - false: The first parent node in common between the calling `Node`
6886    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6887    ///     Currently, this means the first parent node in common is a
6888    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
6889    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
6890    ///     `Node` may have both their constraints apply during constraints
6891    ///     aggregation of the logical allocation, if both `Node`(s) are
6892    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6893    ///     this case, there is no `BufferCollectionTokenGroup` that will
6894    ///     directly prevent the two `Node`(s) from both being selected and
6895    ///     their constraints both aggregated, but even when false, one or both
6896    ///     `Node`(s) may still be eliminated from consideration if one or both
6897    ///     `Node`(s) has a direct or indirect parent
6898    ///     `BufferCollectionTokenGroup` which selects a child subtree other
6899    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
6900    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6901    ///   associated with the same buffer collection as the calling `Node`.
6902    ///   Another reason for this error is if the `node_ref` is an
6903    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6904    ///   a real `node_ref` obtained from `GetNodeRef`.
6905    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6906    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6907    ///   the needed rights expected on a real `node_ref`.
6908    /// * No other failing status codes are returned by this call.  However,
6909    ///   sysmem may add additional codes in future, so the client should have
6910    ///   sensible default handling for any failing status code.
6911    pub fn r#is_alternate_for(
6912        &self,
6913        mut payload: NodeIsAlternateForRequest,
6914    ) -> fidl::client::QueryResponseFut<
6915        NodeIsAlternateForResult,
6916        fidl::encoding::DefaultFuchsiaResourceDialect,
6917    > {
6918        BufferCollectionTokenProxyInterface::r#is_alternate_for(self, payload)
6919    }
6920
6921    /// Get the buffer collection ID. This ID is also available from
6922    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6923    /// within the collection).
6924    ///
6925    /// This call is mainly useful in situations where we can't convey a
6926    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6927    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6928    /// handle, which can be joined back up with a `BufferCollection` client end
6929    /// that was created via a different path. Prefer to convey a
6930    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6931    ///
6932    /// Trusting a `buffer_collection_id` value from a source other than sysmem
6933    /// is analogous to trusting a koid value from a source other than zircon.
6934    /// Both should be avoided unless really necessary, and both require
6935    /// caution. In some situations it may be reasonable to refer to a
6936    /// pre-established `BufferCollection` by `buffer_collection_id` via a
6937    /// protocol for efficiency reasons, but an incoming value purporting to be
6938    /// a `buffer_collection_id` is not sufficient alone to justify granting the
6939    /// sender of the `buffer_collection_id` any capability. The sender must
6940    /// first prove to a receiver that the sender has/had a VMO or has/had a
6941    /// `BufferCollectionToken` to the same collection by sending a handle that
6942    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6943    /// `buffer_collection_id` value. The receiver should take care to avoid
6944    /// assuming that a sender had a `BufferCollectionToken` in cases where the
6945    /// sender has only proven that the sender had a VMO.
6946    ///
6947    /// - response `buffer_collection_id` This ID is unique per buffer
6948    ///   collection per boot. Each buffer is uniquely identified by the
6949    ///   `buffer_collection_id` and `buffer_index` together.
6950    pub fn r#get_buffer_collection_id(
6951        &self,
6952    ) -> fidl::client::QueryResponseFut<
6953        NodeGetBufferCollectionIdResponse,
6954        fidl::encoding::DefaultFuchsiaResourceDialect,
6955    > {
6956        BufferCollectionTokenProxyInterface::r#get_buffer_collection_id(self)
6957    }
6958
6959    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6960    /// created after this message to weak, which means that a client's `Node`
6961    /// client end (or a child created after this message) is not alone
6962    /// sufficient to keep allocated VMOs alive.
6963    ///
6964    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6965    /// `close_weak_asap`.
6966    ///
6967    /// This message is only permitted before the `Node` becomes ready for
6968    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6969    ///   * `BufferCollectionToken`: any time
6970    ///   * `BufferCollection`: before `SetConstraints`
6971    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6972    ///
6973    /// Currently, no conversion from strong `Node` to weak `Node` after ready
6974    /// for allocation is provided, but a client can simulate that by creating
6975    /// an additional `Node` before allocation and setting that additional
6976    /// `Node` to weak, and then potentially at some point later sending
6977    /// `Release` and closing the client end of the client's strong `Node`, but
6978    /// keeping the client's weak `Node`.
6979    ///
6980    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6981    /// collection failure (all `Node` client end(s) will see
6982    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6983    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6984    /// this situation until all `Node`(s) are ready for allocation. For initial
6985    /// allocation to succeed, at least one strong `Node` is required to exist
6986    /// at allocation time, but after that client receives VMO handles, that
6987    /// client can `BufferCollection.Release` and close the client end without
6988    /// causing this type of failure.
6989    ///
6990    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6991    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6992    /// separately as appropriate.
6993    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6994        BufferCollectionTokenProxyInterface::r#set_weak(self)
6995    }
6996
6997    /// This indicates to sysmem that the client is prepared to pay attention to
6998    /// `close_weak_asap`.
6999    ///
7000    /// If sent, this message must be before
7001    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
7002    ///
7003    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
7004    /// send this message before `WaitForAllBuffersAllocated`, or a parent
7005    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
7006    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
7007    /// trigger buffer collection failure.
7008    ///
7009    /// This message is necessary because weak sysmem VMOs have not always been
7010    /// a thing, so older clients are not aware of the need to pay attention to
7011    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
7012    /// sysmem weak VMO handles asap. By having this message and requiring
7013    /// participants to indicate their acceptance of this aspect of the overall
7014    /// protocol, we avoid situations where an older client is delivered a weak
7015    /// VMO without any way for sysmem to get that VMO to close quickly later
7016    /// (and on a per-buffer basis).
7017    ///
7018    /// A participant that doesn't handle `close_weak_asap` and also doesn't
7019    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
7020    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
7021    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
7022    /// same participant has a child/delegate which does retrieve VMOs, that
7023    /// child/delegate will need to send `SetWeakOk` before
7024    /// `WaitForAllBuffersAllocated`.
7025    ///
7026    /// + request `for_child_nodes_also` If present and true, this means direct
7027    ///   child nodes of this node created after this message plus all
7028    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
7029    ///   those nodes. Any child node of this node that was created before this
7030    ///   message is not included. This setting is "sticky" in the sense that a
7031    ///   subsequent `SetWeakOk` without this bool set to true does not reset
7032    ///   the server-side bool. If this creates a problem for a participant, a
7033    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
7034    ///   tokens instead, as appropriate. A participant should only set
7035    ///   `for_child_nodes_also` true if the participant can really promise to
7036    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
7037    ///   weak VMO handles held by participants holding the corresponding child
7038    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
7039    ///   which are using sysmem(1) can be weak, despite the clients of those
7040    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
7041    ///   direct way to find out about `close_weak_asap`. This only applies to
7042    ///   descendents of this `Node` which are using sysmem(1), not to this
7043    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
7044    ///   token, which will fail allocation unless an ancestor of this `Node`
7045    ///   specified `for_child_nodes_also` true.
7046    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7047        BufferCollectionTokenProxyInterface::r#set_weak_ok(self, payload)
7048    }
7049
7050    /// The server_end will be closed after this `Node` and any child nodes have
7051    /// have released their buffer counts, making those counts available for
7052    /// reservation by a different `Node` via
7053    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
7054    ///
7055    /// The `Node` buffer counts may not be released until the entire tree of
7056    /// `Node`(s) is closed or failed, because
7057    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
7058    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
7059    /// `Node` buffer counts remain reserved until the orphaned node is later
7060    /// cleaned up.
7061    ///
7062    /// If the `Node` exceeds a fairly large number of attached eventpair server
7063    /// ends, a log message will indicate this and the `Node` (and the
7064    /// appropriate) sub-tree will fail.
7065    ///
7066    /// The `server_end` will remain open when
7067    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
7068    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
7069    /// [`fuchsia.sysmem2/BufferCollection`].
7070    ///
7071    /// This message can also be used with a
7072    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7073    pub fn r#attach_node_tracking(
7074        &self,
7075        mut payload: NodeAttachNodeTrackingRequest,
7076    ) -> Result<(), fidl::Error> {
7077        BufferCollectionTokenProxyInterface::r#attach_node_tracking(self, payload)
7078    }
7079
7080    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
7081    /// one, referring to the same buffer collection.
7082    ///
7083    /// The created tokens are children of this token in the
7084    /// [`fuchsia.sysmem2/Node`] heirarchy.
7085    ///
7086    /// This method can be used to add more participants, by transferring the
7087    /// newly created tokens to additional participants.
7088    ///
7089    /// A new token will be returned for each entry in the
7090    /// `rights_attenuation_masks` array.
7091    ///
7092    /// If the called token may not actually be a valid token due to a
7093    /// potentially hostile/untrusted provider of the token, consider using
7094    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
7095    /// instead of potentially getting stuck indefinitely if
7096    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
7097    /// due to the calling token not being a real token.
7098    ///
7099    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
7100    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
7101    /// method, because the sync step is included in this call, at the cost of a
7102    /// round trip during this call.
7103    ///
7104    /// All tokens must be turned in to sysmem via
7105    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7106    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7107    /// successfully allocate buffers (or to logically allocate buffers in the
7108    /// case of subtrees involving
7109    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
7110    ///
7111    /// All table fields are currently required.
7112    ///
7113    /// + request `rights_attenuation_mask` In each entry of
7114    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
7115    ///   in the buffer VMO rights obtainable via the corresponding returned
7116    ///   token. This allows an initiator or intermediary participant to
7117    ///   attenuate the rights available to a participant. This does not allow a
7118    ///   participant to gain rights that the participant doesn't already have.
7119    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
7120    ///   attenuation should be applied.
7121    /// - response `tokens` The client ends of each newly created token.
7122    pub fn r#duplicate_sync(
7123        &self,
7124        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7125    ) -> fidl::client::QueryResponseFut<
7126        BufferCollectionTokenDuplicateSyncResponse,
7127        fidl::encoding::DefaultFuchsiaResourceDialect,
7128    > {
7129        BufferCollectionTokenProxyInterface::r#duplicate_sync(self, payload)
7130    }
7131
7132    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
7133    /// one, referring to the same buffer collection.
7134    ///
7135    /// The created token is a child of this token in the
7136    /// [`fuchsia.sysmem2/Node`] heirarchy.
7137    ///
7138    /// This method can be used to add a participant, by transferring the newly
7139    /// created token to another participant.
7140    ///
7141    /// This one-way message can be used instead of the two-way
7142    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
7143    /// performance sensitive cases where it would be undesireable to wait for
7144    /// sysmem to respond to
7145    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
7146    /// client code isn't structured to make it easy to duplicate all the needed
7147    /// tokens at once.
7148    ///
7149    /// After sending one or more `Duplicate` messages, and before sending the
7150    /// newly created child tokens to other participants (or to other
7151    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
7152    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
7153    /// `Sync` call can be made on the token, or on the `BufferCollection`
7154    /// obtained by passing this token to `BindSharedCollection`.  Either will
7155    /// ensure that the server knows about the tokens created via `Duplicate`
7156    /// before the other participant sends the token to the server via separate
7157    /// `Allocator` channel.
7158    ///
7159    /// All tokens must be turned in via
7160    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7161    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7162    /// successfully allocate buffers.
7163    ///
7164    /// All table fields are currently required.
7165    ///
7166    /// + request `rights_attenuation_mask` The rights bits that are zero in
7167    ///   this mask will be absent in the buffer VMO rights obtainable via the
7168    ///   client end of `token_request`. This allows an initiator or
7169    ///   intermediary participant to attenuate the rights available to a
7170    ///   delegate participant. This does not allow a participant to gain rights
7171    ///   that the participant doesn't already have. The value
7172    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
7173    ///   should be applied.
7174    ///   + These values for rights_attenuation_mask result in no attenuation:
7175    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
7176    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
7177    ///       computed)
7178    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
7179    /// + request `token_request` is the server end of a `BufferCollectionToken`
7180    ///   channel. The client end of this channel acts as another participant in
7181    ///   the shared buffer collection.
7182    pub fn r#duplicate(
7183        &self,
7184        mut payload: BufferCollectionTokenDuplicateRequest,
7185    ) -> Result<(), fidl::Error> {
7186        BufferCollectionTokenProxyInterface::r#duplicate(self, payload)
7187    }
7188
7189    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
7190    ///
7191    /// When the `BufferCollectionToken` is converted to a
7192    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
7193    /// the `BufferCollection` also.
7194    ///
7195    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
7196    /// client end without having sent
7197    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
7198    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
7199    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
7200    /// to the root `Node`, which fails the whole buffer collection. In
7201    /// contrast, a dispensable `Node` can fail after buffers are allocated
7202    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
7203    /// heirarchy.
7204    ///
7205    /// The dispensable `Node` participates in constraints aggregation along
7206    /// with its parent before buffer allocation. If the dispensable `Node`
7207    /// fails before buffers are allocated, the failure propagates to the
7208    /// dispensable `Node`'s parent.
7209    ///
7210    /// After buffers are allocated, failure of the dispensable `Node` (or any
7211    /// child of the dispensable `Node`) does not propagate to the dispensable
7212    /// `Node`'s parent. Failure does propagate from a normal child of a
7213    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
7214    /// blocked from reaching its parent if the child is attached using
7215    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
7216    /// dispensable and the failure occurred after allocation.
7217    ///
7218    /// A dispensable `Node` can be used in cases where a participant needs to
7219    /// provide constraints, but after buffers are allocated, the participant
7220    /// can fail without causing buffer collection failure from the parent
7221    /// `Node`'s point of view.
7222    ///
7223    /// In contrast, `BufferCollection.AttachToken` can be used to create a
7224    /// `BufferCollectionToken` which does not participate in constraints
7225    /// aggregation with its parent `Node`, and whose failure at any time does
7226    /// not propagate to its parent `Node`, and whose potential delay providing
7227    /// constraints does not prevent the parent `Node` from completing its
7228    /// buffer allocation.
7229    ///
7230    /// An initiator (creator of the root `Node` using
7231    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
7232    /// scenarios choose to initially use a dispensable `Node` for a first
7233    /// instance of a participant, and then later if the first instance of that
7234    /// participant fails, a new second instance of that participant my be given
7235    /// a `BufferCollectionToken` created with `AttachToken`.
7236    ///
7237    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
7238    /// shortly before sending the dispensable `BufferCollectionToken` to a
7239    /// delegate participant. Because `SetDispensable` prevents propagation of
7240    /// child `Node` failure to parent `Node`(s), if the client was relying on
7241    /// noticing child failure via failure of the parent `Node` retained by the
7242    /// client, the client may instead need to notice failure via other means.
7243    /// If other means aren't available/convenient, the client can instead
7244    /// retain the dispensable `Node` and create a child `Node` under that to
7245    /// send to the delegate participant, retaining this `Node` in order to
7246    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
7247    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
7248    /// (e.g. starting a new instance of the delegate participant and handing it
7249    /// a `BufferCollectionToken` created using
7250    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
7251    /// and clean up in a client-specific way).
7252    ///
7253    /// While it is possible (and potentially useful) to `SetDispensable` on a
7254    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
7255    /// to later replace a failed dispensable `Node` that was a direct child of
7256    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
7257    /// (since there's no `AttachToken` on a group). Instead, to enable
7258    /// `AttachToken` replacement in this case, create an additional
7259    /// non-dispensable token that's a direct child of the group and make the
7260    /// existing dispensable token a child of the additional token.  This way,
7261    /// the additional token that is a direct child of the group has
7262    /// `BufferCollection.AttachToken` which can be used to replace the failed
7263    /// dispensable token.
7264    ///
7265    /// `SetDispensable` on an already-dispensable token is idempotent.
7266    pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7267        BufferCollectionTokenProxyInterface::r#set_dispensable(self)
7268    }
7269
7270    /// Create a logical OR among a set of tokens, called a
7271    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7272    ///
7273    /// Most sysmem clients and many participants don't need to care about this
7274    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
7275    /// a participant wants to attempt to include one set of delegate
7276    /// participants, but if constraints don't combine successfully that way,
7277    /// fall back to a different (possibly overlapping) set of delegate
7278    /// participants, and/or fall back to a less demanding strategy (in terms of
7279    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
7280    /// across all involved delegate participants). In such cases, a
7281    /// `BufferCollectionTokenGroup` is useful.
7282    ///
7283    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
7284    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
7285    /// which are not selected during aggregation will fail (close), which a
7286    /// potential participant should notice when their `BufferCollection`
7287    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
7288    /// clean up the speculative usage that didn't end up happening (this is
7289    /// simimlar to a normal `BufferCollection` server end closing on failure to
7290    /// allocate a logical buffer collection or later async failure of a buffer
7291    /// collection).
7292    ///
7293    /// See comments on protocol `BufferCollectionTokenGroup`.
7294    ///
7295    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
7296    /// applied to the whole group can be achieved with a
7297    /// `BufferCollectionToken` for this purpose as a direct parent of the
7298    /// `BufferCollectionTokenGroup`.
7299    ///
7300    /// All table fields are currently required.
7301    ///
7302    /// + request `group_request` The server end of a
7303    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
7304    pub fn r#create_buffer_collection_token_group(
7305        &self,
7306        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7307    ) -> Result<(), fidl::Error> {
7308        BufferCollectionTokenProxyInterface::r#create_buffer_collection_token_group(self, payload)
7309    }
7310}
7311
7312impl BufferCollectionTokenProxyInterface for BufferCollectionTokenProxy {
7313    type SyncResponseFut =
7314        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
7315    fn r#sync(&self) -> Self::SyncResponseFut {
7316        fn _decode(
7317            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7318        ) -> Result<(), fidl::Error> {
7319            let _response = fidl::client::decode_transaction_body::<
7320                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
7321                fidl::encoding::DefaultFuchsiaResourceDialect,
7322                0x11ac2555cf575b54,
7323            >(_buf?)?
7324            .into_result::<BufferCollectionTokenMarker>("sync")?;
7325            Ok(_response)
7326        }
7327        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
7328            (),
7329            0x11ac2555cf575b54,
7330            fidl::encoding::DynamicFlags::FLEXIBLE,
7331            _decode,
7332        )
7333    }
7334
7335    fn r#release(&self) -> Result<(), fidl::Error> {
7336        self.client.send::<fidl::encoding::EmptyPayload>(
7337            (),
7338            0x6a5cae7d6d6e04c6,
7339            fidl::encoding::DynamicFlags::FLEXIBLE,
7340        )
7341    }
7342
7343    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
7344        self.client.send::<NodeSetNameRequest>(
7345            payload,
7346            0xb41f1624f48c1e9,
7347            fidl::encoding::DynamicFlags::FLEXIBLE,
7348        )
7349    }
7350
7351    fn r#set_debug_client_info(
7352        &self,
7353        mut payload: &NodeSetDebugClientInfoRequest,
7354    ) -> Result<(), fidl::Error> {
7355        self.client.send::<NodeSetDebugClientInfoRequest>(
7356            payload,
7357            0x5cde8914608d99b1,
7358            fidl::encoding::DynamicFlags::FLEXIBLE,
7359        )
7360    }
7361
7362    fn r#set_debug_timeout_log_deadline(
7363        &self,
7364        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
7365    ) -> Result<(), fidl::Error> {
7366        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
7367            payload,
7368            0x716b0af13d5c0806,
7369            fidl::encoding::DynamicFlags::FLEXIBLE,
7370        )
7371    }
7372
7373    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
7374        self.client.send::<fidl::encoding::EmptyPayload>(
7375            (),
7376            0x5209c77415b4dfad,
7377            fidl::encoding::DynamicFlags::FLEXIBLE,
7378        )
7379    }
7380
7381    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
7382        NodeGetNodeRefResponse,
7383        fidl::encoding::DefaultFuchsiaResourceDialect,
7384    >;
7385    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
7386        fn _decode(
7387            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7388        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
7389            let _response = fidl::client::decode_transaction_body::<
7390                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
7391                fidl::encoding::DefaultFuchsiaResourceDialect,
7392                0x5b3d0e51614df053,
7393            >(_buf?)?
7394            .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
7395            Ok(_response)
7396        }
7397        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
7398            (),
7399            0x5b3d0e51614df053,
7400            fidl::encoding::DynamicFlags::FLEXIBLE,
7401            _decode,
7402        )
7403    }
7404
7405    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
7406        NodeIsAlternateForResult,
7407        fidl::encoding::DefaultFuchsiaResourceDialect,
7408    >;
7409    fn r#is_alternate_for(
7410        &self,
7411        mut payload: NodeIsAlternateForRequest,
7412    ) -> Self::IsAlternateForResponseFut {
7413        fn _decode(
7414            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7415        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
7416            let _response = fidl::client::decode_transaction_body::<
7417                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
7418                fidl::encoding::DefaultFuchsiaResourceDialect,
7419                0x3a58e00157e0825,
7420            >(_buf?)?
7421            .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
7422            Ok(_response.map(|x| x))
7423        }
7424        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
7425            &mut payload,
7426            0x3a58e00157e0825,
7427            fidl::encoding::DynamicFlags::FLEXIBLE,
7428            _decode,
7429        )
7430    }
7431
7432    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
7433        NodeGetBufferCollectionIdResponse,
7434        fidl::encoding::DefaultFuchsiaResourceDialect,
7435    >;
7436    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
7437        fn _decode(
7438            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7439        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
7440            let _response = fidl::client::decode_transaction_body::<
7441                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
7442                fidl::encoding::DefaultFuchsiaResourceDialect,
7443                0x77d19a494b78ba8c,
7444            >(_buf?)?
7445            .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
7446            Ok(_response)
7447        }
7448        self.client.send_query_and_decode::<
7449            fidl::encoding::EmptyPayload,
7450            NodeGetBufferCollectionIdResponse,
7451        >(
7452            (),
7453            0x77d19a494b78ba8c,
7454            fidl::encoding::DynamicFlags::FLEXIBLE,
7455            _decode,
7456        )
7457    }
7458
7459    fn r#set_weak(&self) -> Result<(), fidl::Error> {
7460        self.client.send::<fidl::encoding::EmptyPayload>(
7461            (),
7462            0x22dd3ea514eeffe1,
7463            fidl::encoding::DynamicFlags::FLEXIBLE,
7464        )
7465    }
7466
7467    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7468        self.client.send::<NodeSetWeakOkRequest>(
7469            &mut payload,
7470            0x38a44fc4d7724be9,
7471            fidl::encoding::DynamicFlags::FLEXIBLE,
7472        )
7473    }
7474
7475    fn r#attach_node_tracking(
7476        &self,
7477        mut payload: NodeAttachNodeTrackingRequest,
7478    ) -> Result<(), fidl::Error> {
7479        self.client.send::<NodeAttachNodeTrackingRequest>(
7480            &mut payload,
7481            0x3f22f2a293d3cdac,
7482            fidl::encoding::DynamicFlags::FLEXIBLE,
7483        )
7484    }
7485
7486    type DuplicateSyncResponseFut = fidl::client::QueryResponseFut<
7487        BufferCollectionTokenDuplicateSyncResponse,
7488        fidl::encoding::DefaultFuchsiaResourceDialect,
7489    >;
7490    fn r#duplicate_sync(
7491        &self,
7492        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7493    ) -> Self::DuplicateSyncResponseFut {
7494        fn _decode(
7495            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7496        ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
7497            let _response = fidl::client::decode_transaction_body::<
7498                fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
7499                fidl::encoding::DefaultFuchsiaResourceDialect,
7500                0x1c1af9919d1ca45c,
7501            >(_buf?)?
7502            .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
7503            Ok(_response)
7504        }
7505        self.client.send_query_and_decode::<
7506            BufferCollectionTokenDuplicateSyncRequest,
7507            BufferCollectionTokenDuplicateSyncResponse,
7508        >(
7509            payload,
7510            0x1c1af9919d1ca45c,
7511            fidl::encoding::DynamicFlags::FLEXIBLE,
7512            _decode,
7513        )
7514    }
7515
7516    fn r#duplicate(
7517        &self,
7518        mut payload: BufferCollectionTokenDuplicateRequest,
7519    ) -> Result<(), fidl::Error> {
7520        self.client.send::<BufferCollectionTokenDuplicateRequest>(
7521            &mut payload,
7522            0x73e78f92ee7fb887,
7523            fidl::encoding::DynamicFlags::FLEXIBLE,
7524        )
7525    }
7526
7527    fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7528        self.client.send::<fidl::encoding::EmptyPayload>(
7529            (),
7530            0x228acf979254df8b,
7531            fidl::encoding::DynamicFlags::FLEXIBLE,
7532        )
7533    }
7534
7535    fn r#create_buffer_collection_token_group(
7536        &self,
7537        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7538    ) -> Result<(), fidl::Error> {
7539        self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
7540            &mut payload,
7541            0x30f8d48e77bd36f2,
7542            fidl::encoding::DynamicFlags::FLEXIBLE,
7543        )
7544    }
7545}
7546
7547pub struct BufferCollectionTokenEventStream {
7548    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
7549}
7550
7551impl std::marker::Unpin for BufferCollectionTokenEventStream {}
7552
7553impl futures::stream::FusedStream for BufferCollectionTokenEventStream {
7554    fn is_terminated(&self) -> bool {
7555        self.event_receiver.is_terminated()
7556    }
7557}
7558
7559impl futures::Stream for BufferCollectionTokenEventStream {
7560    type Item = Result<BufferCollectionTokenEvent, fidl::Error>;
7561
7562    fn poll_next(
7563        mut self: std::pin::Pin<&mut Self>,
7564        cx: &mut std::task::Context<'_>,
7565    ) -> std::task::Poll<Option<Self::Item>> {
7566        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
7567            &mut self.event_receiver,
7568            cx
7569        )?) {
7570            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenEvent::decode(buf))),
7571            None => std::task::Poll::Ready(None),
7572        }
7573    }
7574}
7575
7576#[derive(Debug)]
7577pub enum BufferCollectionTokenEvent {
7578    #[non_exhaustive]
7579    _UnknownEvent {
7580        /// Ordinal of the event that was sent.
7581        ordinal: u64,
7582    },
7583}
7584
7585impl BufferCollectionTokenEvent {
7586    /// Decodes a message buffer as a [`BufferCollectionTokenEvent`].
7587    fn decode(
7588        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
7589    ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
7590        let (bytes, _handles) = buf.split_mut();
7591        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7592        debug_assert_eq!(tx_header.tx_id, 0);
7593        match tx_header.ordinal {
7594            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7595                Ok(BufferCollectionTokenEvent::_UnknownEvent { ordinal: tx_header.ordinal })
7596            }
7597            _ => Err(fidl::Error::UnknownOrdinal {
7598                ordinal: tx_header.ordinal,
7599                protocol_name:
7600                    <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7601            }),
7602        }
7603    }
7604}
7605
7606/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionToken.
7607pub struct BufferCollectionTokenRequestStream {
7608    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7609    is_terminated: bool,
7610}
7611
7612impl std::marker::Unpin for BufferCollectionTokenRequestStream {}
7613
7614impl futures::stream::FusedStream for BufferCollectionTokenRequestStream {
7615    fn is_terminated(&self) -> bool {
7616        self.is_terminated
7617    }
7618}
7619
7620impl fidl::endpoints::RequestStream for BufferCollectionTokenRequestStream {
7621    type Protocol = BufferCollectionTokenMarker;
7622    type ControlHandle = BufferCollectionTokenControlHandle;
7623
7624    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
7625        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
7626    }
7627
7628    fn control_handle(&self) -> Self::ControlHandle {
7629        BufferCollectionTokenControlHandle { inner: self.inner.clone() }
7630    }
7631
7632    fn into_inner(
7633        self,
7634    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
7635    {
7636        (self.inner, self.is_terminated)
7637    }
7638
7639    fn from_inner(
7640        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7641        is_terminated: bool,
7642    ) -> Self {
7643        Self { inner, is_terminated }
7644    }
7645}
7646
7647impl futures::Stream for BufferCollectionTokenRequestStream {
7648    type Item = Result<BufferCollectionTokenRequest, fidl::Error>;
7649
7650    fn poll_next(
7651        mut self: std::pin::Pin<&mut Self>,
7652        cx: &mut std::task::Context<'_>,
7653    ) -> std::task::Poll<Option<Self::Item>> {
7654        let this = &mut *self;
7655        if this.inner.check_shutdown(cx) {
7656            this.is_terminated = true;
7657            return std::task::Poll::Ready(None);
7658        }
7659        if this.is_terminated {
7660            panic!("polled BufferCollectionTokenRequestStream after completion");
7661        }
7662        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
7663            |bytes, handles| {
7664                match this.inner.channel().read_etc(cx, bytes, handles) {
7665                    std::task::Poll::Ready(Ok(())) => {}
7666                    std::task::Poll::Pending => return std::task::Poll::Pending,
7667                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
7668                        this.is_terminated = true;
7669                        return std::task::Poll::Ready(None);
7670                    }
7671                    std::task::Poll::Ready(Err(e)) => {
7672                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
7673                            e.into(),
7674                        ))));
7675                    }
7676                }
7677
7678                // A message has been received from the channel
7679                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7680
7681                std::task::Poll::Ready(Some(match header.ordinal {
7682                0x11ac2555cf575b54 => {
7683                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7684                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7685                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7686                    let control_handle = BufferCollectionTokenControlHandle {
7687                        inner: this.inner.clone(),
7688                    };
7689                    Ok(BufferCollectionTokenRequest::Sync {
7690                        responder: BufferCollectionTokenSyncResponder {
7691                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7692                            tx_id: header.tx_id,
7693                        },
7694                    })
7695                }
7696                0x6a5cae7d6d6e04c6 => {
7697                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7698                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7699                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7700                    let control_handle = BufferCollectionTokenControlHandle {
7701                        inner: this.inner.clone(),
7702                    };
7703                    Ok(BufferCollectionTokenRequest::Release {
7704                        control_handle,
7705                    })
7706                }
7707                0xb41f1624f48c1e9 => {
7708                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7709                    let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7710                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
7711                    let control_handle = BufferCollectionTokenControlHandle {
7712                        inner: this.inner.clone(),
7713                    };
7714                    Ok(BufferCollectionTokenRequest::SetName {payload: req,
7715                        control_handle,
7716                    })
7717                }
7718                0x5cde8914608d99b1 => {
7719                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7720                    let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7721                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
7722                    let control_handle = BufferCollectionTokenControlHandle {
7723                        inner: this.inner.clone(),
7724                    };
7725                    Ok(BufferCollectionTokenRequest::SetDebugClientInfo {payload: req,
7726                        control_handle,
7727                    })
7728                }
7729                0x716b0af13d5c0806 => {
7730                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7731                    let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7732                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
7733                    let control_handle = BufferCollectionTokenControlHandle {
7734                        inner: this.inner.clone(),
7735                    };
7736                    Ok(BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {payload: req,
7737                        control_handle,
7738                    })
7739                }
7740                0x5209c77415b4dfad => {
7741                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7742                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7743                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7744                    let control_handle = BufferCollectionTokenControlHandle {
7745                        inner: this.inner.clone(),
7746                    };
7747                    Ok(BufferCollectionTokenRequest::SetVerboseLogging {
7748                        control_handle,
7749                    })
7750                }
7751                0x5b3d0e51614df053 => {
7752                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7753                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7754                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7755                    let control_handle = BufferCollectionTokenControlHandle {
7756                        inner: this.inner.clone(),
7757                    };
7758                    Ok(BufferCollectionTokenRequest::GetNodeRef {
7759                        responder: BufferCollectionTokenGetNodeRefResponder {
7760                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7761                            tx_id: header.tx_id,
7762                        },
7763                    })
7764                }
7765                0x3a58e00157e0825 => {
7766                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7767                    let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7768                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
7769                    let control_handle = BufferCollectionTokenControlHandle {
7770                        inner: this.inner.clone(),
7771                    };
7772                    Ok(BufferCollectionTokenRequest::IsAlternateFor {payload: req,
7773                        responder: BufferCollectionTokenIsAlternateForResponder {
7774                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7775                            tx_id: header.tx_id,
7776                        },
7777                    })
7778                }
7779                0x77d19a494b78ba8c => {
7780                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7781                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7782                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7783                    let control_handle = BufferCollectionTokenControlHandle {
7784                        inner: this.inner.clone(),
7785                    };
7786                    Ok(BufferCollectionTokenRequest::GetBufferCollectionId {
7787                        responder: BufferCollectionTokenGetBufferCollectionIdResponder {
7788                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7789                            tx_id: header.tx_id,
7790                        },
7791                    })
7792                }
7793                0x22dd3ea514eeffe1 => {
7794                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7795                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7796                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7797                    let control_handle = BufferCollectionTokenControlHandle {
7798                        inner: this.inner.clone(),
7799                    };
7800                    Ok(BufferCollectionTokenRequest::SetWeak {
7801                        control_handle,
7802                    })
7803                }
7804                0x38a44fc4d7724be9 => {
7805                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7806                    let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7807                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
7808                    let control_handle = BufferCollectionTokenControlHandle {
7809                        inner: this.inner.clone(),
7810                    };
7811                    Ok(BufferCollectionTokenRequest::SetWeakOk {payload: req,
7812                        control_handle,
7813                    })
7814                }
7815                0x3f22f2a293d3cdac => {
7816                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7817                    let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7818                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
7819                    let control_handle = BufferCollectionTokenControlHandle {
7820                        inner: this.inner.clone(),
7821                    };
7822                    Ok(BufferCollectionTokenRequest::AttachNodeTracking {payload: req,
7823                        control_handle,
7824                    })
7825                }
7826                0x1c1af9919d1ca45c => {
7827                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7828                    let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7829                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateSyncRequest>(&header, _body_bytes, handles, &mut req)?;
7830                    let control_handle = BufferCollectionTokenControlHandle {
7831                        inner: this.inner.clone(),
7832                    };
7833                    Ok(BufferCollectionTokenRequest::DuplicateSync {payload: req,
7834                        responder: BufferCollectionTokenDuplicateSyncResponder {
7835                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7836                            tx_id: header.tx_id,
7837                        },
7838                    })
7839                }
7840                0x73e78f92ee7fb887 => {
7841                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7842                    let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7843                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateRequest>(&header, _body_bytes, handles, &mut req)?;
7844                    let control_handle = BufferCollectionTokenControlHandle {
7845                        inner: this.inner.clone(),
7846                    };
7847                    Ok(BufferCollectionTokenRequest::Duplicate {payload: req,
7848                        control_handle,
7849                    })
7850                }
7851                0x228acf979254df8b => {
7852                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7853                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7854                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7855                    let control_handle = BufferCollectionTokenControlHandle {
7856                        inner: this.inner.clone(),
7857                    };
7858                    Ok(BufferCollectionTokenRequest::SetDispensable {
7859                        control_handle,
7860                    })
7861                }
7862                0x30f8d48e77bd36f2 => {
7863                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7864                    let mut req = fidl::new_empty!(BufferCollectionTokenCreateBufferCollectionTokenGroupRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7865                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(&header, _body_bytes, handles, &mut req)?;
7866                    let control_handle = BufferCollectionTokenControlHandle {
7867                        inner: this.inner.clone(),
7868                    };
7869                    Ok(BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {payload: req,
7870                        control_handle,
7871                    })
7872                }
7873                _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7874                    Ok(BufferCollectionTokenRequest::_UnknownMethod {
7875                        ordinal: header.ordinal,
7876                        control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7877                        method_type: fidl::MethodType::OneWay,
7878                    })
7879                }
7880                _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7881                    this.inner.send_framework_err(
7882                        fidl::encoding::FrameworkErr::UnknownMethod,
7883                        header.tx_id,
7884                        header.ordinal,
7885                        header.dynamic_flags(),
7886                        (bytes, handles),
7887                    )?;
7888                    Ok(BufferCollectionTokenRequest::_UnknownMethod {
7889                        ordinal: header.ordinal,
7890                        control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7891                        method_type: fidl::MethodType::TwoWay,
7892                    })
7893                }
7894                _ => Err(fidl::Error::UnknownOrdinal {
7895                    ordinal: header.ordinal,
7896                    protocol_name: <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7897                }),
7898            }))
7899            },
7900        )
7901    }
7902}
7903
7904/// A [`fuchsia.sysmem2/BufferCollectionToken`] is not a buffer collection, but
7905/// rather is a way to identify a specific potential shared buffer collection,
7906/// and a way to distribute that potential shared buffer collection to
7907/// additional participants prior to the buffer collection allocating any
7908/// buffers.
7909///
7910/// Epitaphs are not used in this protocol.
7911///
7912/// We use a channel for the `BufferCollectionToken` instead of a single
7913/// `eventpair` (pair) because this way we can detect error conditions like a
7914/// participant failing mid-create.
7915#[derive(Debug)]
7916pub enum BufferCollectionTokenRequest {
7917    /// Ensure that previous messages have been received server side. This is
7918    /// particularly useful after previous messages that created new tokens,
7919    /// because a token must be known to the sysmem server before sending the
7920    /// token to another participant.
7921    ///
7922    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
7923    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
7924    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
7925    /// to mitigate the possibility of a hostile/fake
7926    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
7927    /// Another way is to pass the token to
7928    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
7929    /// the token as part of exchanging it for a
7930    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
7931    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
7932    /// of stalling.
7933    ///
7934    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
7935    /// and then starting and completing a `Sync`, it's then safe to send the
7936    /// `BufferCollectionToken` client ends to other participants knowing the
7937    /// server will recognize the tokens when they're sent by the other
7938    /// participants to sysmem in a
7939    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
7940    /// efficient way to create tokens while avoiding unnecessary round trips.
7941    ///
7942    /// Other options include waiting for each
7943    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
7944    /// individually (using separate call to `Sync` after each), or calling
7945    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
7946    /// converted to a `BufferCollection` via
7947    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
7948    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
7949    /// the sync step and can create multiple tokens at once.
7950    Sync { responder: BufferCollectionTokenSyncResponder },
7951    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
7952    ///
7953    /// Normally a participant will convert a `BufferCollectionToken` into a
7954    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
7955    /// `Release` via the token (and then close the channel immediately or
7956    /// shortly later in response to server closing the server end), which
7957    /// avoids causing buffer collection failure. Without a prior `Release`,
7958    /// closing the `BufferCollectionToken` client end will cause buffer
7959    /// collection failure.
7960    ///
7961    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
7962    ///
7963    /// By default the server handles unexpected closure of a
7964    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
7965    /// first) by failing the buffer collection. Partly this is to expedite
7966    /// closing VMO handles to reclaim memory when any participant fails. If a
7967    /// participant would like to cleanly close a `BufferCollection` without
7968    /// causing buffer collection failure, the participant can send `Release`
7969    /// before closing the `BufferCollection` client end. The `Release` can
7970    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
7971    /// buffer collection won't require constraints from this node in order to
7972    /// allocate. If after `SetConstraints`, the constraints are retained and
7973    /// aggregated, despite the lack of `BufferCollection` connection at the
7974    /// time of constraints aggregation.
7975    ///
7976    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
7977    ///
7978    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
7979    /// end (without `Release` first) will trigger failure of the buffer
7980    /// collection. To close a `BufferCollectionTokenGroup` channel without
7981    /// failing the buffer collection, ensure that AllChildrenPresent() has been
7982    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
7983    /// client end.
7984    ///
7985    /// If `Release` occurs before
7986    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
7987    /// buffer collection will fail (triggered by reception of `Release` without
7988    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
7989    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
7990    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
7991    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
7992    /// close requires `AllChildrenPresent` (if not already sent), then
7993    /// `Release`, then close client end.
7994    ///
7995    /// If `Release` occurs after `AllChildrenPresent`, the children and all
7996    /// their constraints remain intact (just as they would if the
7997    /// `BufferCollectionTokenGroup` channel had remained open), and the client
7998    /// end close doesn't trigger buffer collection failure.
7999    ///
8000    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
8001    ///
8002    /// For brevity, the per-channel-protocol paragraphs above ignore the
8003    /// separate failure domain created by
8004    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
8005    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
8006    /// unexpectedly closes (without `Release` first) and that client end is
8007    /// under a failure domain, instead of failing the whole buffer collection,
8008    /// the failure domain is failed, but the buffer collection itself is
8009    /// isolated from failure of the failure domain. Such failure domains can be
8010    /// nested, in which case only the inner-most failure domain in which the
8011    /// `Node` resides fails.
8012    Release { control_handle: BufferCollectionTokenControlHandle },
8013    /// Set a name for VMOs in this buffer collection.
8014    ///
8015    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
8016    /// will be truncated to fit. The name of the vmo will be suffixed with the
8017    /// buffer index within the collection (if the suffix fits within
8018    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
8019    /// listed in the inspect data.
8020    ///
8021    /// The name only affects VMOs allocated after the name is set; this call
8022    /// does not rename existing VMOs. If multiple clients set different names
8023    /// then the larger priority value will win. Setting a new name with the
8024    /// same priority as a prior name doesn't change the name.
8025    ///
8026    /// All table fields are currently required.
8027    ///
8028    /// + request `priority` The name is only set if this is the first `SetName`
8029    ///   or if `priority` is greater than any previous `priority` value in
8030    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
8031    /// + request `name` The name for VMOs created under this buffer collection.
8032    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenControlHandle },
8033    /// Set information about the current client that can be used by sysmem to
8034    /// help diagnose leaking memory and allocation stalls waiting for a
8035    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
8036    ///
8037    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
8038    /// `Node`(s) derived from this `Node`, unless overriden by
8039    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
8040    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
8041    ///
8042    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
8043    /// `Allocator` is the most efficient way to ensure that all
8044    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
8045    /// set, and is also more efficient than separately sending the same debug
8046    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
8047    /// created [`fuchsia.sysmem2/Node`].
8048    ///
8049    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
8050    /// indicate which client is closing their channel first, leading to subtree
8051    /// failure (which can be normal if the purpose of the subtree is over, but
8052    /// if happening earlier than expected, the client-channel-specific name can
8053    /// help diagnose where the failure is first coming from, from sysmem's
8054    /// point of view).
8055    ///
8056    /// All table fields are currently required.
8057    ///
8058    /// + request `name` This can be an arbitrary string, but the current
8059    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
8060    /// + request `id` This can be an arbitrary id, but the current process ID
8061    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
8062    SetDebugClientInfo {
8063        payload: NodeSetDebugClientInfoRequest,
8064        control_handle: BufferCollectionTokenControlHandle,
8065    },
8066    /// Sysmem logs a warning if sysmem hasn't seen
8067    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
8068    /// within 5 seconds after creation of a new collection.
8069    ///
8070    /// Clients can call this method to change when the log is printed. If
8071    /// multiple client set the deadline, it's unspecified which deadline will
8072    /// take effect.
8073    ///
8074    /// In most cases the default works well.
8075    ///
8076    /// All table fields are currently required.
8077    ///
8078    /// + request `deadline` The time at which sysmem will start trying to log
8079    ///   the warning, unless all constraints are with sysmem by then.
8080    SetDebugTimeoutLogDeadline {
8081        payload: NodeSetDebugTimeoutLogDeadlineRequest,
8082        control_handle: BufferCollectionTokenControlHandle,
8083    },
8084    /// This enables verbose logging for the buffer collection.
8085    ///
8086    /// Verbose logging includes constraints set via
8087    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
8088    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
8089    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
8090    /// the tree of `Node`(s).
8091    ///
8092    /// Normally sysmem prints only a single line complaint when aggregation
8093    /// fails, with just the specific detailed reason that aggregation failed,
8094    /// with little surrounding context.  While this is often enough to diagnose
8095    /// a problem if only a small change was made and everything was working
8096    /// before the small change, it's often not particularly helpful for getting
8097    /// a new buffer collection to work for the first time.  Especially with
8098    /// more complex trees of nodes, involving things like
8099    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
8100    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
8101    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
8102    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
8103    /// looks like and why it's failing a logical allocation, or why a tree or
8104    /// subtree is failing sooner than expected.
8105    ///
8106    /// The intent of the extra logging is to be acceptable from a performance
8107    /// point of view, under the assumption that verbose logging is only enabled
8108    /// on a low number of buffer collections. If we're not tracking down a bug,
8109    /// we shouldn't send this message.
8110    SetVerboseLogging { control_handle: BufferCollectionTokenControlHandle },
8111    /// This gets a handle that can be used as a parameter to
8112    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
8113    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
8114    /// client obtained this handle from this `Node`.
8115    ///
8116    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
8117    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
8118    /// despite the two calls typically being on different channels.
8119    ///
8120    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
8121    ///
8122    /// All table fields are currently required.
8123    ///
8124    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
8125    ///   different `Node` channel, to prove that the client obtained the handle
8126    ///   from this `Node`.
8127    GetNodeRef { responder: BufferCollectionTokenGetNodeRefResponder },
8128    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
8129    /// rooted at a different child token of a common parent
8130    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
8131    /// passed-in `node_ref`.
8132    ///
8133    /// This call is for assisting with admission control de-duplication, and
8134    /// with debugging.
8135    ///
8136    /// The `node_ref` must be obtained using
8137    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
8138    ///
8139    /// The `node_ref` can be a duplicated handle; it's not necessary to call
8140    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
8141    ///
8142    /// If a calling token may not actually be a valid token at all due to a
8143    /// potentially hostile/untrusted provider of the token, call
8144    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8145    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
8146    /// never responds due to a calling token not being a real token (not really
8147    /// talking to sysmem).  Another option is to call
8148    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
8149    /// which also validates the token along with converting it to a
8150    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
8151    ///
8152    /// All table fields are currently required.
8153    ///
8154    /// - response `is_alternate`
8155    ///   - true: The first parent node in common between the calling node and
8156    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
8157    ///     that the calling `Node` and the `node_ref` `Node` will not have both
8158    ///     their constraints apply - rather sysmem will choose one or the other
8159    ///     of the constraints - never both.  This is because only one child of
8160    ///     a `BufferCollectionTokenGroup` is selected during logical
8161    ///     allocation, with only that one child's subtree contributing to
8162    ///     constraints aggregation.
8163    ///   - false: The first parent node in common between the calling `Node`
8164    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
8165    ///     Currently, this means the first parent node in common is a
8166    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
8167    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
8168    ///     `Node` may have both their constraints apply during constraints
8169    ///     aggregation of the logical allocation, if both `Node`(s) are
8170    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
8171    ///     this case, there is no `BufferCollectionTokenGroup` that will
8172    ///     directly prevent the two `Node`(s) from both being selected and
8173    ///     their constraints both aggregated, but even when false, one or both
8174    ///     `Node`(s) may still be eliminated from consideration if one or both
8175    ///     `Node`(s) has a direct or indirect parent
8176    ///     `BufferCollectionTokenGroup` which selects a child subtree other
8177    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
8178    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
8179    ///   associated with the same buffer collection as the calling `Node`.
8180    ///   Another reason for this error is if the `node_ref` is an
8181    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
8182    ///   a real `node_ref` obtained from `GetNodeRef`.
8183    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
8184    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
8185    ///   the needed rights expected on a real `node_ref`.
8186    /// * No other failing status codes are returned by this call.  However,
8187    ///   sysmem may add additional codes in future, so the client should have
8188    ///   sensible default handling for any failing status code.
8189    IsAlternateFor {
8190        payload: NodeIsAlternateForRequest,
8191        responder: BufferCollectionTokenIsAlternateForResponder,
8192    },
8193    /// Get the buffer collection ID. This ID is also available from
8194    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
8195    /// within the collection).
8196    ///
8197    /// This call is mainly useful in situations where we can't convey a
8198    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
8199    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
8200    /// handle, which can be joined back up with a `BufferCollection` client end
8201    /// that was created via a different path. Prefer to convey a
8202    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
8203    ///
8204    /// Trusting a `buffer_collection_id` value from a source other than sysmem
8205    /// is analogous to trusting a koid value from a source other than zircon.
8206    /// Both should be avoided unless really necessary, and both require
8207    /// caution. In some situations it may be reasonable to refer to a
8208    /// pre-established `BufferCollection` by `buffer_collection_id` via a
8209    /// protocol for efficiency reasons, but an incoming value purporting to be
8210    /// a `buffer_collection_id` is not sufficient alone to justify granting the
8211    /// sender of the `buffer_collection_id` any capability. The sender must
8212    /// first prove to a receiver that the sender has/had a VMO or has/had a
8213    /// `BufferCollectionToken` to the same collection by sending a handle that
8214    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
8215    /// `buffer_collection_id` value. The receiver should take care to avoid
8216    /// assuming that a sender had a `BufferCollectionToken` in cases where the
8217    /// sender has only proven that the sender had a VMO.
8218    ///
8219    /// - response `buffer_collection_id` This ID is unique per buffer
8220    ///   collection per boot. Each buffer is uniquely identified by the
8221    ///   `buffer_collection_id` and `buffer_index` together.
8222    GetBufferCollectionId { responder: BufferCollectionTokenGetBufferCollectionIdResponder },
8223    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
8224    /// created after this message to weak, which means that a client's `Node`
8225    /// client end (or a child created after this message) is not alone
8226    /// sufficient to keep allocated VMOs alive.
8227    ///
8228    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
8229    /// `close_weak_asap`.
8230    ///
8231    /// This message is only permitted before the `Node` becomes ready for
8232    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
8233    ///   * `BufferCollectionToken`: any time
8234    ///   * `BufferCollection`: before `SetConstraints`
8235    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
8236    ///
8237    /// Currently, no conversion from strong `Node` to weak `Node` after ready
8238    /// for allocation is provided, but a client can simulate that by creating
8239    /// an additional `Node` before allocation and setting that additional
8240    /// `Node` to weak, and then potentially at some point later sending
8241    /// `Release` and closing the client end of the client's strong `Node`, but
8242    /// keeping the client's weak `Node`.
8243    ///
8244    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
8245    /// collection failure (all `Node` client end(s) will see
8246    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
8247    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
8248    /// this situation until all `Node`(s) are ready for allocation. For initial
8249    /// allocation to succeed, at least one strong `Node` is required to exist
8250    /// at allocation time, but after that client receives VMO handles, that
8251    /// client can `BufferCollection.Release` and close the client end without
8252    /// causing this type of failure.
8253    ///
8254    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
8255    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
8256    /// separately as appropriate.
8257    SetWeak { control_handle: BufferCollectionTokenControlHandle },
8258    /// This indicates to sysmem that the client is prepared to pay attention to
8259    /// `close_weak_asap`.
8260    ///
8261    /// If sent, this message must be before
8262    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
8263    ///
8264    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
8265    /// send this message before `WaitForAllBuffersAllocated`, or a parent
8266    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
8267    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
8268    /// trigger buffer collection failure.
8269    ///
8270    /// This message is necessary because weak sysmem VMOs have not always been
8271    /// a thing, so older clients are not aware of the need to pay attention to
8272    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
8273    /// sysmem weak VMO handles asap. By having this message and requiring
8274    /// participants to indicate their acceptance of this aspect of the overall
8275    /// protocol, we avoid situations where an older client is delivered a weak
8276    /// VMO without any way for sysmem to get that VMO to close quickly later
8277    /// (and on a per-buffer basis).
8278    ///
8279    /// A participant that doesn't handle `close_weak_asap` and also doesn't
8280    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
8281    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
8282    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
8283    /// same participant has a child/delegate which does retrieve VMOs, that
8284    /// child/delegate will need to send `SetWeakOk` before
8285    /// `WaitForAllBuffersAllocated`.
8286    ///
8287    /// + request `for_child_nodes_also` If present and true, this means direct
8288    ///   child nodes of this node created after this message plus all
8289    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
8290    ///   those nodes. Any child node of this node that was created before this
8291    ///   message is not included. This setting is "sticky" in the sense that a
8292    ///   subsequent `SetWeakOk` without this bool set to true does not reset
8293    ///   the server-side bool. If this creates a problem for a participant, a
8294    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
8295    ///   tokens instead, as appropriate. A participant should only set
8296    ///   `for_child_nodes_also` true if the participant can really promise to
8297    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
8298    ///   weak VMO handles held by participants holding the corresponding child
8299    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
8300    ///   which are using sysmem(1) can be weak, despite the clients of those
8301    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
8302    ///   direct way to find out about `close_weak_asap`. This only applies to
8303    ///   descendents of this `Node` which are using sysmem(1), not to this
8304    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
8305    ///   token, which will fail allocation unless an ancestor of this `Node`
8306    ///   specified `for_child_nodes_also` true.
8307    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionTokenControlHandle },
8308    /// The server_end will be closed after this `Node` and any child nodes have
8309    /// have released their buffer counts, making those counts available for
8310    /// reservation by a different `Node` via
8311    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
8312    ///
8313    /// The `Node` buffer counts may not be released until the entire tree of
8314    /// `Node`(s) is closed or failed, because
8315    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
8316    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
8317    /// `Node` buffer counts remain reserved until the orphaned node is later
8318    /// cleaned up.
8319    ///
8320    /// If the `Node` exceeds a fairly large number of attached eventpair server
8321    /// ends, a log message will indicate this and the `Node` (and the
8322    /// appropriate) sub-tree will fail.
8323    ///
8324    /// The `server_end` will remain open when
8325    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
8326    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
8327    /// [`fuchsia.sysmem2/BufferCollection`].
8328    ///
8329    /// This message can also be used with a
8330    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8331    AttachNodeTracking {
8332        payload: NodeAttachNodeTrackingRequest,
8333        control_handle: BufferCollectionTokenControlHandle,
8334    },
8335    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
8336    /// one, referring to the same buffer collection.
8337    ///
8338    /// The created tokens are children of this token in the
8339    /// [`fuchsia.sysmem2/Node`] heirarchy.
8340    ///
8341    /// This method can be used to add more participants, by transferring the
8342    /// newly created tokens to additional participants.
8343    ///
8344    /// A new token will be returned for each entry in the
8345    /// `rights_attenuation_masks` array.
8346    ///
8347    /// If the called token may not actually be a valid token due to a
8348    /// potentially hostile/untrusted provider of the token, consider using
8349    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8350    /// instead of potentially getting stuck indefinitely if
8351    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
8352    /// due to the calling token not being a real token.
8353    ///
8354    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
8355    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
8356    /// method, because the sync step is included in this call, at the cost of a
8357    /// round trip during this call.
8358    ///
8359    /// All tokens must be turned in to sysmem via
8360    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8361    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8362    /// successfully allocate buffers (or to logically allocate buffers in the
8363    /// case of subtrees involving
8364    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
8365    ///
8366    /// All table fields are currently required.
8367    ///
8368    /// + request `rights_attenuation_mask` In each entry of
8369    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
8370    ///   in the buffer VMO rights obtainable via the corresponding returned
8371    ///   token. This allows an initiator or intermediary participant to
8372    ///   attenuate the rights available to a participant. This does not allow a
8373    ///   participant to gain rights that the participant doesn't already have.
8374    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
8375    ///   attenuation should be applied.
8376    /// - response `tokens` The client ends of each newly created token.
8377    DuplicateSync {
8378        payload: BufferCollectionTokenDuplicateSyncRequest,
8379        responder: BufferCollectionTokenDuplicateSyncResponder,
8380    },
8381    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
8382    /// one, referring to the same buffer collection.
8383    ///
8384    /// The created token is a child of this token in the
8385    /// [`fuchsia.sysmem2/Node`] heirarchy.
8386    ///
8387    /// This method can be used to add a participant, by transferring the newly
8388    /// created token to another participant.
8389    ///
8390    /// This one-way message can be used instead of the two-way
8391    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
8392    /// performance sensitive cases where it would be undesireable to wait for
8393    /// sysmem to respond to
8394    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
8395    /// client code isn't structured to make it easy to duplicate all the needed
8396    /// tokens at once.
8397    ///
8398    /// After sending one or more `Duplicate` messages, and before sending the
8399    /// newly created child tokens to other participants (or to other
8400    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
8401    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
8402    /// `Sync` call can be made on the token, or on the `BufferCollection`
8403    /// obtained by passing this token to `BindSharedCollection`.  Either will
8404    /// ensure that the server knows about the tokens created via `Duplicate`
8405    /// before the other participant sends the token to the server via separate
8406    /// `Allocator` channel.
8407    ///
8408    /// All tokens must be turned in via
8409    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8410    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8411    /// successfully allocate buffers.
8412    ///
8413    /// All table fields are currently required.
8414    ///
8415    /// + request `rights_attenuation_mask` The rights bits that are zero in
8416    ///   this mask will be absent in the buffer VMO rights obtainable via the
8417    ///   client end of `token_request`. This allows an initiator or
8418    ///   intermediary participant to attenuate the rights available to a
8419    ///   delegate participant. This does not allow a participant to gain rights
8420    ///   that the participant doesn't already have. The value
8421    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
8422    ///   should be applied.
8423    ///   + These values for rights_attenuation_mask result in no attenuation:
8424    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
8425    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
8426    ///       computed)
8427    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
8428    /// + request `token_request` is the server end of a `BufferCollectionToken`
8429    ///   channel. The client end of this channel acts as another participant in
8430    ///   the shared buffer collection.
8431    Duplicate {
8432        payload: BufferCollectionTokenDuplicateRequest,
8433        control_handle: BufferCollectionTokenControlHandle,
8434    },
8435    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
8436    ///
8437    /// When the `BufferCollectionToken` is converted to a
8438    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
8439    /// the `BufferCollection` also.
8440    ///
8441    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
8442    /// client end without having sent
8443    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
8444    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
8445    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
8446    /// to the root `Node`, which fails the whole buffer collection. In
8447    /// contrast, a dispensable `Node` can fail after buffers are allocated
8448    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
8449    /// heirarchy.
8450    ///
8451    /// The dispensable `Node` participates in constraints aggregation along
8452    /// with its parent before buffer allocation. If the dispensable `Node`
8453    /// fails before buffers are allocated, the failure propagates to the
8454    /// dispensable `Node`'s parent.
8455    ///
8456    /// After buffers are allocated, failure of the dispensable `Node` (or any
8457    /// child of the dispensable `Node`) does not propagate to the dispensable
8458    /// `Node`'s parent. Failure does propagate from a normal child of a
8459    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
8460    /// blocked from reaching its parent if the child is attached using
8461    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
8462    /// dispensable and the failure occurred after allocation.
8463    ///
8464    /// A dispensable `Node` can be used in cases where a participant needs to
8465    /// provide constraints, but after buffers are allocated, the participant
8466    /// can fail without causing buffer collection failure from the parent
8467    /// `Node`'s point of view.
8468    ///
8469    /// In contrast, `BufferCollection.AttachToken` can be used to create a
8470    /// `BufferCollectionToken` which does not participate in constraints
8471    /// aggregation with its parent `Node`, and whose failure at any time does
8472    /// not propagate to its parent `Node`, and whose potential delay providing
8473    /// constraints does not prevent the parent `Node` from completing its
8474    /// buffer allocation.
8475    ///
8476    /// An initiator (creator of the root `Node` using
8477    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
8478    /// scenarios choose to initially use a dispensable `Node` for a first
8479    /// instance of a participant, and then later if the first instance of that
8480    /// participant fails, a new second instance of that participant my be given
8481    /// a `BufferCollectionToken` created with `AttachToken`.
8482    ///
8483    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
8484    /// shortly before sending the dispensable `BufferCollectionToken` to a
8485    /// delegate participant. Because `SetDispensable` prevents propagation of
8486    /// child `Node` failure to parent `Node`(s), if the client was relying on
8487    /// noticing child failure via failure of the parent `Node` retained by the
8488    /// client, the client may instead need to notice failure via other means.
8489    /// If other means aren't available/convenient, the client can instead
8490    /// retain the dispensable `Node` and create a child `Node` under that to
8491    /// send to the delegate participant, retaining this `Node` in order to
8492    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
8493    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
8494    /// (e.g. starting a new instance of the delegate participant and handing it
8495    /// a `BufferCollectionToken` created using
8496    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
8497    /// and clean up in a client-specific way).
8498    ///
8499    /// While it is possible (and potentially useful) to `SetDispensable` on a
8500    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
8501    /// to later replace a failed dispensable `Node` that was a direct child of
8502    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
8503    /// (since there's no `AttachToken` on a group). Instead, to enable
8504    /// `AttachToken` replacement in this case, create an additional
8505    /// non-dispensable token that's a direct child of the group and make the
8506    /// existing dispensable token a child of the additional token.  This way,
8507    /// the additional token that is a direct child of the group has
8508    /// `BufferCollection.AttachToken` which can be used to replace the failed
8509    /// dispensable token.
8510    ///
8511    /// `SetDispensable` on an already-dispensable token is idempotent.
8512    SetDispensable { control_handle: BufferCollectionTokenControlHandle },
8513    /// Create a logical OR among a set of tokens, called a
8514    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8515    ///
8516    /// Most sysmem clients and many participants don't need to care about this
8517    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
8518    /// a participant wants to attempt to include one set of delegate
8519    /// participants, but if constraints don't combine successfully that way,
8520    /// fall back to a different (possibly overlapping) set of delegate
8521    /// participants, and/or fall back to a less demanding strategy (in terms of
8522    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
8523    /// across all involved delegate participants). In such cases, a
8524    /// `BufferCollectionTokenGroup` is useful.
8525    ///
8526    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
8527    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
8528    /// which are not selected during aggregation will fail (close), which a
8529    /// potential participant should notice when their `BufferCollection`
8530    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
8531    /// clean up the speculative usage that didn't end up happening (this is
8532    /// simimlar to a normal `BufferCollection` server end closing on failure to
8533    /// allocate a logical buffer collection or later async failure of a buffer
8534    /// collection).
8535    ///
8536    /// See comments on protocol `BufferCollectionTokenGroup`.
8537    ///
8538    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
8539    /// applied to the whole group can be achieved with a
8540    /// `BufferCollectionToken` for this purpose as a direct parent of the
8541    /// `BufferCollectionTokenGroup`.
8542    ///
8543    /// All table fields are currently required.
8544    ///
8545    /// + request `group_request` The server end of a
8546    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
8547    CreateBufferCollectionTokenGroup {
8548        payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8549        control_handle: BufferCollectionTokenControlHandle,
8550    },
8551    /// An interaction was received which does not match any known method.
8552    #[non_exhaustive]
8553    _UnknownMethod {
8554        /// Ordinal of the method that was called.
8555        ordinal: u64,
8556        control_handle: BufferCollectionTokenControlHandle,
8557        method_type: fidl::MethodType,
8558    },
8559}
8560
8561impl BufferCollectionTokenRequest {
8562    #[allow(irrefutable_let_patterns)]
8563    pub fn into_sync(self) -> Option<(BufferCollectionTokenSyncResponder)> {
8564        if let BufferCollectionTokenRequest::Sync { responder } = self {
8565            Some((responder))
8566        } else {
8567            None
8568        }
8569    }
8570
8571    #[allow(irrefutable_let_patterns)]
8572    pub fn into_release(self) -> Option<(BufferCollectionTokenControlHandle)> {
8573        if let BufferCollectionTokenRequest::Release { control_handle } = self {
8574            Some((control_handle))
8575        } else {
8576            None
8577        }
8578    }
8579
8580    #[allow(irrefutable_let_patterns)]
8581    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionTokenControlHandle)> {
8582        if let BufferCollectionTokenRequest::SetName { payload, control_handle } = self {
8583            Some((payload, control_handle))
8584        } else {
8585            None
8586        }
8587    }
8588
8589    #[allow(irrefutable_let_patterns)]
8590    pub fn into_set_debug_client_info(
8591        self,
8592    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenControlHandle)> {
8593        if let BufferCollectionTokenRequest::SetDebugClientInfo { payload, control_handle } = self {
8594            Some((payload, control_handle))
8595        } else {
8596            None
8597        }
8598    }
8599
8600    #[allow(irrefutable_let_patterns)]
8601    pub fn into_set_debug_timeout_log_deadline(
8602        self,
8603    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenControlHandle)> {
8604        if let BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {
8605            payload,
8606            control_handle,
8607        } = self
8608        {
8609            Some((payload, control_handle))
8610        } else {
8611            None
8612        }
8613    }
8614
8615    #[allow(irrefutable_let_patterns)]
8616    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenControlHandle)> {
8617        if let BufferCollectionTokenRequest::SetVerboseLogging { control_handle } = self {
8618            Some((control_handle))
8619        } else {
8620            None
8621        }
8622    }
8623
8624    #[allow(irrefutable_let_patterns)]
8625    pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGetNodeRefResponder)> {
8626        if let BufferCollectionTokenRequest::GetNodeRef { responder } = self {
8627            Some((responder))
8628        } else {
8629            None
8630        }
8631    }
8632
8633    #[allow(irrefutable_let_patterns)]
8634    pub fn into_is_alternate_for(
8635        self,
8636    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenIsAlternateForResponder)> {
8637        if let BufferCollectionTokenRequest::IsAlternateFor { payload, responder } = self {
8638            Some((payload, responder))
8639        } else {
8640            None
8641        }
8642    }
8643
8644    #[allow(irrefutable_let_patterns)]
8645    pub fn into_get_buffer_collection_id(
8646        self,
8647    ) -> Option<(BufferCollectionTokenGetBufferCollectionIdResponder)> {
8648        if let BufferCollectionTokenRequest::GetBufferCollectionId { responder } = self {
8649            Some((responder))
8650        } else {
8651            None
8652        }
8653    }
8654
8655    #[allow(irrefutable_let_patterns)]
8656    pub fn into_set_weak(self) -> Option<(BufferCollectionTokenControlHandle)> {
8657        if let BufferCollectionTokenRequest::SetWeak { control_handle } = self {
8658            Some((control_handle))
8659        } else {
8660            None
8661        }
8662    }
8663
8664    #[allow(irrefutable_let_patterns)]
8665    pub fn into_set_weak_ok(
8666        self,
8667    ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenControlHandle)> {
8668        if let BufferCollectionTokenRequest::SetWeakOk { payload, control_handle } = self {
8669            Some((payload, control_handle))
8670        } else {
8671            None
8672        }
8673    }
8674
8675    #[allow(irrefutable_let_patterns)]
8676    pub fn into_attach_node_tracking(
8677        self,
8678    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenControlHandle)> {
8679        if let BufferCollectionTokenRequest::AttachNodeTracking { payload, control_handle } = self {
8680            Some((payload, control_handle))
8681        } else {
8682            None
8683        }
8684    }
8685
8686    #[allow(irrefutable_let_patterns)]
8687    pub fn into_duplicate_sync(
8688        self,
8689    ) -> Option<(
8690        BufferCollectionTokenDuplicateSyncRequest,
8691        BufferCollectionTokenDuplicateSyncResponder,
8692    )> {
8693        if let BufferCollectionTokenRequest::DuplicateSync { payload, responder } = self {
8694            Some((payload, responder))
8695        } else {
8696            None
8697        }
8698    }
8699
8700    #[allow(irrefutable_let_patterns)]
8701    pub fn into_duplicate(
8702        self,
8703    ) -> Option<(BufferCollectionTokenDuplicateRequest, BufferCollectionTokenControlHandle)> {
8704        if let BufferCollectionTokenRequest::Duplicate { payload, control_handle } = self {
8705            Some((payload, control_handle))
8706        } else {
8707            None
8708        }
8709    }
8710
8711    #[allow(irrefutable_let_patterns)]
8712    pub fn into_set_dispensable(self) -> Option<(BufferCollectionTokenControlHandle)> {
8713        if let BufferCollectionTokenRequest::SetDispensable { control_handle } = self {
8714            Some((control_handle))
8715        } else {
8716            None
8717        }
8718    }
8719
8720    #[allow(irrefutable_let_patterns)]
8721    pub fn into_create_buffer_collection_token_group(
8722        self,
8723    ) -> Option<(
8724        BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8725        BufferCollectionTokenControlHandle,
8726    )> {
8727        if let BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {
8728            payload,
8729            control_handle,
8730        } = self
8731        {
8732            Some((payload, control_handle))
8733        } else {
8734            None
8735        }
8736    }
8737
8738    /// Name of the method defined in FIDL
8739    pub fn method_name(&self) -> &'static str {
8740        match *self {
8741            BufferCollectionTokenRequest::Sync { .. } => "sync",
8742            BufferCollectionTokenRequest::Release { .. } => "release",
8743            BufferCollectionTokenRequest::SetName { .. } => "set_name",
8744            BufferCollectionTokenRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
8745            BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline { .. } => {
8746                "set_debug_timeout_log_deadline"
8747            }
8748            BufferCollectionTokenRequest::SetVerboseLogging { .. } => "set_verbose_logging",
8749            BufferCollectionTokenRequest::GetNodeRef { .. } => "get_node_ref",
8750            BufferCollectionTokenRequest::IsAlternateFor { .. } => "is_alternate_for",
8751            BufferCollectionTokenRequest::GetBufferCollectionId { .. } => {
8752                "get_buffer_collection_id"
8753            }
8754            BufferCollectionTokenRequest::SetWeak { .. } => "set_weak",
8755            BufferCollectionTokenRequest::SetWeakOk { .. } => "set_weak_ok",
8756            BufferCollectionTokenRequest::AttachNodeTracking { .. } => "attach_node_tracking",
8757            BufferCollectionTokenRequest::DuplicateSync { .. } => "duplicate_sync",
8758            BufferCollectionTokenRequest::Duplicate { .. } => "duplicate",
8759            BufferCollectionTokenRequest::SetDispensable { .. } => "set_dispensable",
8760            BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup { .. } => {
8761                "create_buffer_collection_token_group"
8762            }
8763            BufferCollectionTokenRequest::_UnknownMethod {
8764                method_type: fidl::MethodType::OneWay,
8765                ..
8766            } => "unknown one-way method",
8767            BufferCollectionTokenRequest::_UnknownMethod {
8768                method_type: fidl::MethodType::TwoWay,
8769                ..
8770            } => "unknown two-way method",
8771        }
8772    }
8773}
8774
8775#[derive(Debug, Clone)]
8776pub struct BufferCollectionTokenControlHandle {
8777    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
8778}
8779
8780impl fidl::endpoints::ControlHandle for BufferCollectionTokenControlHandle {
8781    fn shutdown(&self) {
8782        self.inner.shutdown()
8783    }
8784
8785    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
8786        self.inner.shutdown_with_epitaph(status)
8787    }
8788
8789    fn is_closed(&self) -> bool {
8790        self.inner.channel().is_closed()
8791    }
8792    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
8793        self.inner.channel().on_closed()
8794    }
8795
8796    #[cfg(target_os = "fuchsia")]
8797    fn signal_peer(
8798        &self,
8799        clear_mask: zx::Signals,
8800        set_mask: zx::Signals,
8801    ) -> Result<(), zx_status::Status> {
8802        use fidl::Peered;
8803        self.inner.channel().signal_peer(clear_mask, set_mask)
8804    }
8805}
8806
8807impl BufferCollectionTokenControlHandle {}
8808
8809#[must_use = "FIDL methods require a response to be sent"]
8810#[derive(Debug)]
8811pub struct BufferCollectionTokenSyncResponder {
8812    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8813    tx_id: u32,
8814}
8815
8816/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8817/// if the responder is dropped without sending a response, so that the client
8818/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8819impl std::ops::Drop for BufferCollectionTokenSyncResponder {
8820    fn drop(&mut self) {
8821        self.control_handle.shutdown();
8822        // Safety: drops once, never accessed again
8823        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8824    }
8825}
8826
8827impl fidl::endpoints::Responder for BufferCollectionTokenSyncResponder {
8828    type ControlHandle = BufferCollectionTokenControlHandle;
8829
8830    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8831        &self.control_handle
8832    }
8833
8834    fn drop_without_shutdown(mut self) {
8835        // Safety: drops once, never accessed again due to mem::forget
8836        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8837        // Prevent Drop from running (which would shut down the channel)
8838        std::mem::forget(self);
8839    }
8840}
8841
8842impl BufferCollectionTokenSyncResponder {
8843    /// Sends a response to the FIDL transaction.
8844    ///
8845    /// Sets the channel to shutdown if an error occurs.
8846    pub fn send(self) -> Result<(), fidl::Error> {
8847        let _result = self.send_raw();
8848        if _result.is_err() {
8849            self.control_handle.shutdown();
8850        }
8851        self.drop_without_shutdown();
8852        _result
8853    }
8854
8855    /// Similar to "send" but does not shutdown the channel if an error occurs.
8856    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
8857        let _result = self.send_raw();
8858        self.drop_without_shutdown();
8859        _result
8860    }
8861
8862    fn send_raw(&self) -> Result<(), fidl::Error> {
8863        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
8864            fidl::encoding::Flexible::new(()),
8865            self.tx_id,
8866            0x11ac2555cf575b54,
8867            fidl::encoding::DynamicFlags::FLEXIBLE,
8868        )
8869    }
8870}
8871
8872#[must_use = "FIDL methods require a response to be sent"]
8873#[derive(Debug)]
8874pub struct BufferCollectionTokenGetNodeRefResponder {
8875    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8876    tx_id: u32,
8877}
8878
8879/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8880/// if the responder is dropped without sending a response, so that the client
8881/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8882impl std::ops::Drop for BufferCollectionTokenGetNodeRefResponder {
8883    fn drop(&mut self) {
8884        self.control_handle.shutdown();
8885        // Safety: drops once, never accessed again
8886        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8887    }
8888}
8889
8890impl fidl::endpoints::Responder for BufferCollectionTokenGetNodeRefResponder {
8891    type ControlHandle = BufferCollectionTokenControlHandle;
8892
8893    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8894        &self.control_handle
8895    }
8896
8897    fn drop_without_shutdown(mut self) {
8898        // Safety: drops once, never accessed again due to mem::forget
8899        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8900        // Prevent Drop from running (which would shut down the channel)
8901        std::mem::forget(self);
8902    }
8903}
8904
8905impl BufferCollectionTokenGetNodeRefResponder {
8906    /// Sends a response to the FIDL transaction.
8907    ///
8908    /// Sets the channel to shutdown if an error occurs.
8909    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8910        let _result = self.send_raw(payload);
8911        if _result.is_err() {
8912            self.control_handle.shutdown();
8913        }
8914        self.drop_without_shutdown();
8915        _result
8916    }
8917
8918    /// Similar to "send" but does not shutdown the channel if an error occurs.
8919    pub fn send_no_shutdown_on_err(
8920        self,
8921        mut payload: NodeGetNodeRefResponse,
8922    ) -> Result<(), fidl::Error> {
8923        let _result = self.send_raw(payload);
8924        self.drop_without_shutdown();
8925        _result
8926    }
8927
8928    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8929        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
8930            fidl::encoding::Flexible::new(&mut payload),
8931            self.tx_id,
8932            0x5b3d0e51614df053,
8933            fidl::encoding::DynamicFlags::FLEXIBLE,
8934        )
8935    }
8936}
8937
8938#[must_use = "FIDL methods require a response to be sent"]
8939#[derive(Debug)]
8940pub struct BufferCollectionTokenIsAlternateForResponder {
8941    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8942    tx_id: u32,
8943}
8944
8945/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8946/// if the responder is dropped without sending a response, so that the client
8947/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8948impl std::ops::Drop for BufferCollectionTokenIsAlternateForResponder {
8949    fn drop(&mut self) {
8950        self.control_handle.shutdown();
8951        // Safety: drops once, never accessed again
8952        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8953    }
8954}
8955
8956impl fidl::endpoints::Responder for BufferCollectionTokenIsAlternateForResponder {
8957    type ControlHandle = BufferCollectionTokenControlHandle;
8958
8959    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8960        &self.control_handle
8961    }
8962
8963    fn drop_without_shutdown(mut self) {
8964        // Safety: drops once, never accessed again due to mem::forget
8965        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8966        // Prevent Drop from running (which would shut down the channel)
8967        std::mem::forget(self);
8968    }
8969}
8970
8971impl BufferCollectionTokenIsAlternateForResponder {
8972    /// Sends a response to the FIDL transaction.
8973    ///
8974    /// Sets the channel to shutdown if an error occurs.
8975    pub fn send(
8976        self,
8977        mut result: Result<&NodeIsAlternateForResponse, Error>,
8978    ) -> Result<(), fidl::Error> {
8979        let _result = self.send_raw(result);
8980        if _result.is_err() {
8981            self.control_handle.shutdown();
8982        }
8983        self.drop_without_shutdown();
8984        _result
8985    }
8986
8987    /// Similar to "send" but does not shutdown the channel if an error occurs.
8988    pub fn send_no_shutdown_on_err(
8989        self,
8990        mut result: Result<&NodeIsAlternateForResponse, Error>,
8991    ) -> Result<(), fidl::Error> {
8992        let _result = self.send_raw(result);
8993        self.drop_without_shutdown();
8994        _result
8995    }
8996
8997    fn send_raw(
8998        &self,
8999        mut result: Result<&NodeIsAlternateForResponse, Error>,
9000    ) -> Result<(), fidl::Error> {
9001        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
9002            NodeIsAlternateForResponse,
9003            Error,
9004        >>(
9005            fidl::encoding::FlexibleResult::new(result),
9006            self.tx_id,
9007            0x3a58e00157e0825,
9008            fidl::encoding::DynamicFlags::FLEXIBLE,
9009        )
9010    }
9011}
9012
9013#[must_use = "FIDL methods require a response to be sent"]
9014#[derive(Debug)]
9015pub struct BufferCollectionTokenGetBufferCollectionIdResponder {
9016    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9017    tx_id: u32,
9018}
9019
9020/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9021/// if the responder is dropped without sending a response, so that the client
9022/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9023impl std::ops::Drop for BufferCollectionTokenGetBufferCollectionIdResponder {
9024    fn drop(&mut self) {
9025        self.control_handle.shutdown();
9026        // Safety: drops once, never accessed again
9027        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9028    }
9029}
9030
9031impl fidl::endpoints::Responder for BufferCollectionTokenGetBufferCollectionIdResponder {
9032    type ControlHandle = BufferCollectionTokenControlHandle;
9033
9034    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9035        &self.control_handle
9036    }
9037
9038    fn drop_without_shutdown(mut self) {
9039        // Safety: drops once, never accessed again due to mem::forget
9040        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9041        // Prevent Drop from running (which would shut down the channel)
9042        std::mem::forget(self);
9043    }
9044}
9045
9046impl BufferCollectionTokenGetBufferCollectionIdResponder {
9047    /// Sends a response to the FIDL transaction.
9048    ///
9049    /// Sets the channel to shutdown if an error occurs.
9050    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9051        let _result = self.send_raw(payload);
9052        if _result.is_err() {
9053            self.control_handle.shutdown();
9054        }
9055        self.drop_without_shutdown();
9056        _result
9057    }
9058
9059    /// Similar to "send" but does not shutdown the channel if an error occurs.
9060    pub fn send_no_shutdown_on_err(
9061        self,
9062        mut payload: &NodeGetBufferCollectionIdResponse,
9063    ) -> Result<(), fidl::Error> {
9064        let _result = self.send_raw(payload);
9065        self.drop_without_shutdown();
9066        _result
9067    }
9068
9069    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9070        self.control_handle
9071            .inner
9072            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
9073                fidl::encoding::Flexible::new(payload),
9074                self.tx_id,
9075                0x77d19a494b78ba8c,
9076                fidl::encoding::DynamicFlags::FLEXIBLE,
9077            )
9078    }
9079}
9080
9081#[must_use = "FIDL methods require a response to be sent"]
9082#[derive(Debug)]
9083pub struct BufferCollectionTokenDuplicateSyncResponder {
9084    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9085    tx_id: u32,
9086}
9087
9088/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9089/// if the responder is dropped without sending a response, so that the client
9090/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9091impl std::ops::Drop for BufferCollectionTokenDuplicateSyncResponder {
9092    fn drop(&mut self) {
9093        self.control_handle.shutdown();
9094        // Safety: drops once, never accessed again
9095        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9096    }
9097}
9098
9099impl fidl::endpoints::Responder for BufferCollectionTokenDuplicateSyncResponder {
9100    type ControlHandle = BufferCollectionTokenControlHandle;
9101
9102    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9103        &self.control_handle
9104    }
9105
9106    fn drop_without_shutdown(mut self) {
9107        // Safety: drops once, never accessed again due to mem::forget
9108        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9109        // Prevent Drop from running (which would shut down the channel)
9110        std::mem::forget(self);
9111    }
9112}
9113
9114impl BufferCollectionTokenDuplicateSyncResponder {
9115    /// Sends a response to the FIDL transaction.
9116    ///
9117    /// Sets the channel to shutdown if an error occurs.
9118    pub fn send(
9119        self,
9120        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9121    ) -> Result<(), fidl::Error> {
9122        let _result = self.send_raw(payload);
9123        if _result.is_err() {
9124            self.control_handle.shutdown();
9125        }
9126        self.drop_without_shutdown();
9127        _result
9128    }
9129
9130    /// Similar to "send" but does not shutdown the channel if an error occurs.
9131    pub fn send_no_shutdown_on_err(
9132        self,
9133        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9134    ) -> Result<(), fidl::Error> {
9135        let _result = self.send_raw(payload);
9136        self.drop_without_shutdown();
9137        _result
9138    }
9139
9140    fn send_raw(
9141        &self,
9142        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9143    ) -> Result<(), fidl::Error> {
9144        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
9145            BufferCollectionTokenDuplicateSyncResponse,
9146        >>(
9147            fidl::encoding::Flexible::new(&mut payload),
9148            self.tx_id,
9149            0x1c1af9919d1ca45c,
9150            fidl::encoding::DynamicFlags::FLEXIBLE,
9151        )
9152    }
9153}
9154
9155#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
9156pub struct BufferCollectionTokenGroupMarker;
9157
9158impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenGroupMarker {
9159    type Proxy = BufferCollectionTokenGroupProxy;
9160    type RequestStream = BufferCollectionTokenGroupRequestStream;
9161    #[cfg(target_os = "fuchsia")]
9162    type SynchronousProxy = BufferCollectionTokenGroupSynchronousProxy;
9163
9164    const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionTokenGroup";
9165}
9166
9167pub trait BufferCollectionTokenGroupProxyInterface: Send + Sync {
9168    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
9169    fn r#sync(&self) -> Self::SyncResponseFut;
9170    fn r#release(&self) -> Result<(), fidl::Error>;
9171    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
9172    fn r#set_debug_client_info(
9173        &self,
9174        payload: &NodeSetDebugClientInfoRequest,
9175    ) -> Result<(), fidl::Error>;
9176    fn r#set_debug_timeout_log_deadline(
9177        &self,
9178        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9179    ) -> Result<(), fidl::Error>;
9180    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
9181    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
9182        + Send;
9183    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
9184    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
9185        + Send;
9186    fn r#is_alternate_for(
9187        &self,
9188        payload: NodeIsAlternateForRequest,
9189    ) -> Self::IsAlternateForResponseFut;
9190    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
9191        + Send;
9192    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
9193    fn r#set_weak(&self) -> Result<(), fidl::Error>;
9194    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
9195    fn r#attach_node_tracking(
9196        &self,
9197        payload: NodeAttachNodeTrackingRequest,
9198    ) -> Result<(), fidl::Error>;
9199    fn r#create_child(
9200        &self,
9201        payload: BufferCollectionTokenGroupCreateChildRequest,
9202    ) -> Result<(), fidl::Error>;
9203    type CreateChildrenSyncResponseFut: std::future::Future<
9204            Output = Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error>,
9205        > + Send;
9206    fn r#create_children_sync(
9207        &self,
9208        payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9209    ) -> Self::CreateChildrenSyncResponseFut;
9210    fn r#all_children_present(&self) -> Result<(), fidl::Error>;
9211}
9212#[derive(Debug)]
9213#[cfg(target_os = "fuchsia")]
9214pub struct BufferCollectionTokenGroupSynchronousProxy {
9215    client: fidl::client::sync::Client,
9216}
9217
9218#[cfg(target_os = "fuchsia")]
9219impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenGroupSynchronousProxy {
9220    type Proxy = BufferCollectionTokenGroupProxy;
9221    type Protocol = BufferCollectionTokenGroupMarker;
9222
9223    fn from_channel(inner: fidl::Channel) -> Self {
9224        Self::new(inner)
9225    }
9226
9227    fn into_channel(self) -> fidl::Channel {
9228        self.client.into_channel()
9229    }
9230
9231    fn as_channel(&self) -> &fidl::Channel {
9232        self.client.as_channel()
9233    }
9234}
9235
9236#[cfg(target_os = "fuchsia")]
9237impl BufferCollectionTokenGroupSynchronousProxy {
9238    pub fn new(channel: fidl::Channel) -> Self {
9239        let protocol_name =
9240            <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9241        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
9242    }
9243
9244    pub fn into_channel(self) -> fidl::Channel {
9245        self.client.into_channel()
9246    }
9247
9248    /// Waits until an event arrives and returns it. It is safe for other
9249    /// threads to make concurrent requests while waiting for an event.
9250    pub fn wait_for_event(
9251        &self,
9252        deadline: zx::MonotonicInstant,
9253    ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
9254        BufferCollectionTokenGroupEvent::decode(self.client.wait_for_event(deadline)?)
9255    }
9256
9257    /// Ensure that previous messages have been received server side. This is
9258    /// particularly useful after previous messages that created new tokens,
9259    /// because a token must be known to the sysmem server before sending the
9260    /// token to another participant.
9261    ///
9262    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9263    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9264    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9265    /// to mitigate the possibility of a hostile/fake
9266    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9267    /// Another way is to pass the token to
9268    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9269    /// the token as part of exchanging it for a
9270    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9271    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9272    /// of stalling.
9273    ///
9274    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9275    /// and then starting and completing a `Sync`, it's then safe to send the
9276    /// `BufferCollectionToken` client ends to other participants knowing the
9277    /// server will recognize the tokens when they're sent by the other
9278    /// participants to sysmem in a
9279    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9280    /// efficient way to create tokens while avoiding unnecessary round trips.
9281    ///
9282    /// Other options include waiting for each
9283    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9284    /// individually (using separate call to `Sync` after each), or calling
9285    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9286    /// converted to a `BufferCollection` via
9287    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9288    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9289    /// the sync step and can create multiple tokens at once.
9290    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
9291        let _response = self.client.send_query::<
9292            fidl::encoding::EmptyPayload,
9293            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
9294        >(
9295            (),
9296            0x11ac2555cf575b54,
9297            fidl::encoding::DynamicFlags::FLEXIBLE,
9298            ___deadline,
9299        )?
9300        .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
9301        Ok(_response)
9302    }
9303
9304    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9305    ///
9306    /// Normally a participant will convert a `BufferCollectionToken` into a
9307    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9308    /// `Release` via the token (and then close the channel immediately or
9309    /// shortly later in response to server closing the server end), which
9310    /// avoids causing buffer collection failure. Without a prior `Release`,
9311    /// closing the `BufferCollectionToken` client end will cause buffer
9312    /// collection failure.
9313    ///
9314    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9315    ///
9316    /// By default the server handles unexpected closure of a
9317    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9318    /// first) by failing the buffer collection. Partly this is to expedite
9319    /// closing VMO handles to reclaim memory when any participant fails. If a
9320    /// participant would like to cleanly close a `BufferCollection` without
9321    /// causing buffer collection failure, the participant can send `Release`
9322    /// before closing the `BufferCollection` client end. The `Release` can
9323    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
9324    /// buffer collection won't require constraints from this node in order to
9325    /// allocate. If after `SetConstraints`, the constraints are retained and
9326    /// aggregated, despite the lack of `BufferCollection` connection at the
9327    /// time of constraints aggregation.
9328    ///
9329    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
9330    ///
9331    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
9332    /// end (without `Release` first) will trigger failure of the buffer
9333    /// collection. To close a `BufferCollectionTokenGroup` channel without
9334    /// failing the buffer collection, ensure that AllChildrenPresent() has been
9335    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
9336    /// client end.
9337    ///
9338    /// If `Release` occurs before
9339    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
9340    /// buffer collection will fail (triggered by reception of `Release` without
9341    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
9342    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
9343    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
9344    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
9345    /// close requires `AllChildrenPresent` (if not already sent), then
9346    /// `Release`, then close client end.
9347    ///
9348    /// If `Release` occurs after `AllChildrenPresent`, the children and all
9349    /// their constraints remain intact (just as they would if the
9350    /// `BufferCollectionTokenGroup` channel had remained open), and the client
9351    /// end close doesn't trigger buffer collection failure.
9352    ///
9353    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
9354    ///
9355    /// For brevity, the per-channel-protocol paragraphs above ignore the
9356    /// separate failure domain created by
9357    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
9358    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
9359    /// unexpectedly closes (without `Release` first) and that client end is
9360    /// under a failure domain, instead of failing the whole buffer collection,
9361    /// the failure domain is failed, but the buffer collection itself is
9362    /// isolated from failure of the failure domain. Such failure domains can be
9363    /// nested, in which case only the inner-most failure domain in which the
9364    /// `Node` resides fails.
9365    pub fn r#release(&self) -> Result<(), fidl::Error> {
9366        self.client.send::<fidl::encoding::EmptyPayload>(
9367            (),
9368            0x6a5cae7d6d6e04c6,
9369            fidl::encoding::DynamicFlags::FLEXIBLE,
9370        )
9371    }
9372
9373    /// Set a name for VMOs in this buffer collection.
9374    ///
9375    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
9376    /// will be truncated to fit. The name of the vmo will be suffixed with the
9377    /// buffer index within the collection (if the suffix fits within
9378    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
9379    /// listed in the inspect data.
9380    ///
9381    /// The name only affects VMOs allocated after the name is set; this call
9382    /// does not rename existing VMOs. If multiple clients set different names
9383    /// then the larger priority value will win. Setting a new name with the
9384    /// same priority as a prior name doesn't change the name.
9385    ///
9386    /// All table fields are currently required.
9387    ///
9388    /// + request `priority` The name is only set if this is the first `SetName`
9389    ///   or if `priority` is greater than any previous `priority` value in
9390    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
9391    /// + request `name` The name for VMOs created under this buffer collection.
9392    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
9393        self.client.send::<NodeSetNameRequest>(
9394            payload,
9395            0xb41f1624f48c1e9,
9396            fidl::encoding::DynamicFlags::FLEXIBLE,
9397        )
9398    }
9399
9400    /// Set information about the current client that can be used by sysmem to
9401    /// help diagnose leaking memory and allocation stalls waiting for a
9402    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
9403    ///
9404    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
9405    /// `Node`(s) derived from this `Node`, unless overriden by
9406    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
9407    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
9408    ///
9409    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
9410    /// `Allocator` is the most efficient way to ensure that all
9411    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
9412    /// set, and is also more efficient than separately sending the same debug
9413    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
9414    /// created [`fuchsia.sysmem2/Node`].
9415    ///
9416    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
9417    /// indicate which client is closing their channel first, leading to subtree
9418    /// failure (which can be normal if the purpose of the subtree is over, but
9419    /// if happening earlier than expected, the client-channel-specific name can
9420    /// help diagnose where the failure is first coming from, from sysmem's
9421    /// point of view).
9422    ///
9423    /// All table fields are currently required.
9424    ///
9425    /// + request `name` This can be an arbitrary string, but the current
9426    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
9427    /// + request `id` This can be an arbitrary id, but the current process ID
9428    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
9429    pub fn r#set_debug_client_info(
9430        &self,
9431        mut payload: &NodeSetDebugClientInfoRequest,
9432    ) -> Result<(), fidl::Error> {
9433        self.client.send::<NodeSetDebugClientInfoRequest>(
9434            payload,
9435            0x5cde8914608d99b1,
9436            fidl::encoding::DynamicFlags::FLEXIBLE,
9437        )
9438    }
9439
9440    /// Sysmem logs a warning if sysmem hasn't seen
9441    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
9442    /// within 5 seconds after creation of a new collection.
9443    ///
9444    /// Clients can call this method to change when the log is printed. If
9445    /// multiple client set the deadline, it's unspecified which deadline will
9446    /// take effect.
9447    ///
9448    /// In most cases the default works well.
9449    ///
9450    /// All table fields are currently required.
9451    ///
9452    /// + request `deadline` The time at which sysmem will start trying to log
9453    ///   the warning, unless all constraints are with sysmem by then.
9454    pub fn r#set_debug_timeout_log_deadline(
9455        &self,
9456        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9457    ) -> Result<(), fidl::Error> {
9458        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
9459            payload,
9460            0x716b0af13d5c0806,
9461            fidl::encoding::DynamicFlags::FLEXIBLE,
9462        )
9463    }
9464
9465    /// This enables verbose logging for the buffer collection.
9466    ///
9467    /// Verbose logging includes constraints set via
9468    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
9469    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
9470    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
9471    /// the tree of `Node`(s).
9472    ///
9473    /// Normally sysmem prints only a single line complaint when aggregation
9474    /// fails, with just the specific detailed reason that aggregation failed,
9475    /// with little surrounding context.  While this is often enough to diagnose
9476    /// a problem if only a small change was made and everything was working
9477    /// before the small change, it's often not particularly helpful for getting
9478    /// a new buffer collection to work for the first time.  Especially with
9479    /// more complex trees of nodes, involving things like
9480    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
9481    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
9482    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
9483    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
9484    /// looks like and why it's failing a logical allocation, or why a tree or
9485    /// subtree is failing sooner than expected.
9486    ///
9487    /// The intent of the extra logging is to be acceptable from a performance
9488    /// point of view, under the assumption that verbose logging is only enabled
9489    /// on a low number of buffer collections. If we're not tracking down a bug,
9490    /// we shouldn't send this message.
9491    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
9492        self.client.send::<fidl::encoding::EmptyPayload>(
9493            (),
9494            0x5209c77415b4dfad,
9495            fidl::encoding::DynamicFlags::FLEXIBLE,
9496        )
9497    }
9498
9499    /// This gets a handle that can be used as a parameter to
9500    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
9501    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
9502    /// client obtained this handle from this `Node`.
9503    ///
9504    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
9505    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
9506    /// despite the two calls typically being on different channels.
9507    ///
9508    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
9509    ///
9510    /// All table fields are currently required.
9511    ///
9512    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
9513    ///   different `Node` channel, to prove that the client obtained the handle
9514    ///   from this `Node`.
9515    pub fn r#get_node_ref(
9516        &self,
9517        ___deadline: zx::MonotonicInstant,
9518    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
9519        let _response = self.client.send_query::<
9520            fidl::encoding::EmptyPayload,
9521            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
9522        >(
9523            (),
9524            0x5b3d0e51614df053,
9525            fidl::encoding::DynamicFlags::FLEXIBLE,
9526            ___deadline,
9527        )?
9528        .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
9529        Ok(_response)
9530    }
9531
9532    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
9533    /// rooted at a different child token of a common parent
9534    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
9535    /// passed-in `node_ref`.
9536    ///
9537    /// This call is for assisting with admission control de-duplication, and
9538    /// with debugging.
9539    ///
9540    /// The `node_ref` must be obtained using
9541    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
9542    ///
9543    /// The `node_ref` can be a duplicated handle; it's not necessary to call
9544    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
9545    ///
9546    /// If a calling token may not actually be a valid token at all due to a
9547    /// potentially hostile/untrusted provider of the token, call
9548    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
9549    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
9550    /// never responds due to a calling token not being a real token (not really
9551    /// talking to sysmem).  Another option is to call
9552    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
9553    /// which also validates the token along with converting it to a
9554    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
9555    ///
9556    /// All table fields are currently required.
9557    ///
9558    /// - response `is_alternate`
9559    ///   - true: The first parent node in common between the calling node and
9560    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
9561    ///     that the calling `Node` and the `node_ref` `Node` will not have both
9562    ///     their constraints apply - rather sysmem will choose one or the other
9563    ///     of the constraints - never both.  This is because only one child of
9564    ///     a `BufferCollectionTokenGroup` is selected during logical
9565    ///     allocation, with only that one child's subtree contributing to
9566    ///     constraints aggregation.
9567    ///   - false: The first parent node in common between the calling `Node`
9568    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
9569    ///     Currently, this means the first parent node in common is a
9570    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
9571    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
9572    ///     `Node` may have both their constraints apply during constraints
9573    ///     aggregation of the logical allocation, if both `Node`(s) are
9574    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
9575    ///     this case, there is no `BufferCollectionTokenGroup` that will
9576    ///     directly prevent the two `Node`(s) from both being selected and
9577    ///     their constraints both aggregated, but even when false, one or both
9578    ///     `Node`(s) may still be eliminated from consideration if one or both
9579    ///     `Node`(s) has a direct or indirect parent
9580    ///     `BufferCollectionTokenGroup` which selects a child subtree other
9581    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
9582    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
9583    ///   associated with the same buffer collection as the calling `Node`.
9584    ///   Another reason for this error is if the `node_ref` is an
9585    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
9586    ///   a real `node_ref` obtained from `GetNodeRef`.
9587    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
9588    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
9589    ///   the needed rights expected on a real `node_ref`.
9590    /// * No other failing status codes are returned by this call.  However,
9591    ///   sysmem may add additional codes in future, so the client should have
9592    ///   sensible default handling for any failing status code.
9593    pub fn r#is_alternate_for(
9594        &self,
9595        mut payload: NodeIsAlternateForRequest,
9596        ___deadline: zx::MonotonicInstant,
9597    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
9598        let _response = self.client.send_query::<
9599            NodeIsAlternateForRequest,
9600            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
9601        >(
9602            &mut payload,
9603            0x3a58e00157e0825,
9604            fidl::encoding::DynamicFlags::FLEXIBLE,
9605            ___deadline,
9606        )?
9607        .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
9608        Ok(_response.map(|x| x))
9609    }
9610
9611    /// Get the buffer collection ID. This ID is also available from
9612    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
9613    /// within the collection).
9614    ///
9615    /// This call is mainly useful in situations where we can't convey a
9616    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
9617    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
9618    /// handle, which can be joined back up with a `BufferCollection` client end
9619    /// that was created via a different path. Prefer to convey a
9620    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
9621    ///
9622    /// Trusting a `buffer_collection_id` value from a source other than sysmem
9623    /// is analogous to trusting a koid value from a source other than zircon.
9624    /// Both should be avoided unless really necessary, and both require
9625    /// caution. In some situations it may be reasonable to refer to a
9626    /// pre-established `BufferCollection` by `buffer_collection_id` via a
9627    /// protocol for efficiency reasons, but an incoming value purporting to be
9628    /// a `buffer_collection_id` is not sufficient alone to justify granting the
9629    /// sender of the `buffer_collection_id` any capability. The sender must
9630    /// first prove to a receiver that the sender has/had a VMO or has/had a
9631    /// `BufferCollectionToken` to the same collection by sending a handle that
9632    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
9633    /// `buffer_collection_id` value. The receiver should take care to avoid
9634    /// assuming that a sender had a `BufferCollectionToken` in cases where the
9635    /// sender has only proven that the sender had a VMO.
9636    ///
9637    /// - response `buffer_collection_id` This ID is unique per buffer
9638    ///   collection per boot. Each buffer is uniquely identified by the
9639    ///   `buffer_collection_id` and `buffer_index` together.
9640    pub fn r#get_buffer_collection_id(
9641        &self,
9642        ___deadline: zx::MonotonicInstant,
9643    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
9644        let _response = self.client.send_query::<
9645            fidl::encoding::EmptyPayload,
9646            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
9647        >(
9648            (),
9649            0x77d19a494b78ba8c,
9650            fidl::encoding::DynamicFlags::FLEXIBLE,
9651            ___deadline,
9652        )?
9653        .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
9654        Ok(_response)
9655    }
9656
9657    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
9658    /// created after this message to weak, which means that a client's `Node`
9659    /// client end (or a child created after this message) is not alone
9660    /// sufficient to keep allocated VMOs alive.
9661    ///
9662    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
9663    /// `close_weak_asap`.
9664    ///
9665    /// This message is only permitted before the `Node` becomes ready for
9666    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
9667    ///   * `BufferCollectionToken`: any time
9668    ///   * `BufferCollection`: before `SetConstraints`
9669    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
9670    ///
9671    /// Currently, no conversion from strong `Node` to weak `Node` after ready
9672    /// for allocation is provided, but a client can simulate that by creating
9673    /// an additional `Node` before allocation and setting that additional
9674    /// `Node` to weak, and then potentially at some point later sending
9675    /// `Release` and closing the client end of the client's strong `Node`, but
9676    /// keeping the client's weak `Node`.
9677    ///
9678    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
9679    /// collection failure (all `Node` client end(s) will see
9680    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
9681    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
9682    /// this situation until all `Node`(s) are ready for allocation. For initial
9683    /// allocation to succeed, at least one strong `Node` is required to exist
9684    /// at allocation time, but after that client receives VMO handles, that
9685    /// client can `BufferCollection.Release` and close the client end without
9686    /// causing this type of failure.
9687    ///
9688    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
9689    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
9690    /// separately as appropriate.
9691    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
9692        self.client.send::<fidl::encoding::EmptyPayload>(
9693            (),
9694            0x22dd3ea514eeffe1,
9695            fidl::encoding::DynamicFlags::FLEXIBLE,
9696        )
9697    }
9698
9699    /// This indicates to sysmem that the client is prepared to pay attention to
9700    /// `close_weak_asap`.
9701    ///
9702    /// If sent, this message must be before
9703    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
9704    ///
9705    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
9706    /// send this message before `WaitForAllBuffersAllocated`, or a parent
9707    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
9708    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
9709    /// trigger buffer collection failure.
9710    ///
9711    /// This message is necessary because weak sysmem VMOs have not always been
9712    /// a thing, so older clients are not aware of the need to pay attention to
9713    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
9714    /// sysmem weak VMO handles asap. By having this message and requiring
9715    /// participants to indicate their acceptance of this aspect of the overall
9716    /// protocol, we avoid situations where an older client is delivered a weak
9717    /// VMO without any way for sysmem to get that VMO to close quickly later
9718    /// (and on a per-buffer basis).
9719    ///
9720    /// A participant that doesn't handle `close_weak_asap` and also doesn't
9721    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
9722    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
9723    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
9724    /// same participant has a child/delegate which does retrieve VMOs, that
9725    /// child/delegate will need to send `SetWeakOk` before
9726    /// `WaitForAllBuffersAllocated`.
9727    ///
9728    /// + request `for_child_nodes_also` If present and true, this means direct
9729    ///   child nodes of this node created after this message plus all
9730    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
9731    ///   those nodes. Any child node of this node that was created before this
9732    ///   message is not included. This setting is "sticky" in the sense that a
9733    ///   subsequent `SetWeakOk` without this bool set to true does not reset
9734    ///   the server-side bool. If this creates a problem for a participant, a
9735    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
9736    ///   tokens instead, as appropriate. A participant should only set
9737    ///   `for_child_nodes_also` true if the participant can really promise to
9738    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
9739    ///   weak VMO handles held by participants holding the corresponding child
9740    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
9741    ///   which are using sysmem(1) can be weak, despite the clients of those
9742    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
9743    ///   direct way to find out about `close_weak_asap`. This only applies to
9744    ///   descendents of this `Node` which are using sysmem(1), not to this
9745    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
9746    ///   token, which will fail allocation unless an ancestor of this `Node`
9747    ///   specified `for_child_nodes_also` true.
9748    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
9749        self.client.send::<NodeSetWeakOkRequest>(
9750            &mut payload,
9751            0x38a44fc4d7724be9,
9752            fidl::encoding::DynamicFlags::FLEXIBLE,
9753        )
9754    }
9755
9756    /// The server_end will be closed after this `Node` and any child nodes have
9757    /// have released their buffer counts, making those counts available for
9758    /// reservation by a different `Node` via
9759    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
9760    ///
9761    /// The `Node` buffer counts may not be released until the entire tree of
9762    /// `Node`(s) is closed or failed, because
9763    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
9764    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
9765    /// `Node` buffer counts remain reserved until the orphaned node is later
9766    /// cleaned up.
9767    ///
9768    /// If the `Node` exceeds a fairly large number of attached eventpair server
9769    /// ends, a log message will indicate this and the `Node` (and the
9770    /// appropriate) sub-tree will fail.
9771    ///
9772    /// The `server_end` will remain open when
9773    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
9774    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
9775    /// [`fuchsia.sysmem2/BufferCollection`].
9776    ///
9777    /// This message can also be used with a
9778    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
9779    pub fn r#attach_node_tracking(
9780        &self,
9781        mut payload: NodeAttachNodeTrackingRequest,
9782    ) -> Result<(), fidl::Error> {
9783        self.client.send::<NodeAttachNodeTrackingRequest>(
9784            &mut payload,
9785            0x3f22f2a293d3cdac,
9786            fidl::encoding::DynamicFlags::FLEXIBLE,
9787        )
9788    }
9789
9790    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
9791    /// (including its children) will be selected during allocation (or logical
9792    /// allocation).
9793    ///
9794    /// Before passing the client end of this token to
9795    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
9796    /// [`fuchsia.sysmem2/Node.Sync`] after
9797    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
9798    /// Or the client can use
9799    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
9800    /// essentially includes the `Sync`.
9801    ///
9802    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9803    /// fail the group's subtree and close the connection.
9804    ///
9805    /// After all children have been created, send AllChildrenPresent.
9806    ///
9807    /// + request `token_request` The server end of the new token channel.
9808    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
9809    ///   token allows the holder to get the same rights to buffers as the
9810    ///   parent token (of the group) had. When the value isn't
9811    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
9812    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
9813    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
9814    ///   causes subtree failure.
9815    pub fn r#create_child(
9816        &self,
9817        mut payload: BufferCollectionTokenGroupCreateChildRequest,
9818    ) -> Result<(), fidl::Error> {
9819        self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
9820            &mut payload,
9821            0x41a0075d419f30c5,
9822            fidl::encoding::DynamicFlags::FLEXIBLE,
9823        )
9824    }
9825
9826    /// Create 1 or more child tokens at once, synchronously.  In contrast to
9827    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
9828    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
9829    /// of a returned token to
9830    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
9831    ///
9832    /// The lower-index child tokens are higher priority (attempted sooner) than
9833    /// higher-index child tokens.
9834    ///
9835    /// As per all child tokens, successful aggregation will choose exactly one
9836    /// child among all created children (across all children created across
9837    /// potentially multiple calls to
9838    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
9839    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
9840    ///
9841    /// The maximum permissible total number of children per group, and total
9842    /// number of nodes in an overall tree (from the root) are capped to limits
9843    /// which are not configurable via these protocols.
9844    ///
9845    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
9846    /// this will fail the group's subtree and close the connection.
9847    ///
9848    /// After all children have been created, send AllChildrenPresent.
9849    ///
9850    /// + request `rights_attentuation_masks` The size of the
9851    ///   `rights_attentuation_masks` determines the number of created child
9852    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
9853    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
9854    ///   other value, each 0 bit in the mask attenuates that right.
9855    /// - response `tokens` The created child tokens.
9856    pub fn r#create_children_sync(
9857        &self,
9858        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9859        ___deadline: zx::MonotonicInstant,
9860    ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
9861        let _response = self.client.send_query::<
9862            BufferCollectionTokenGroupCreateChildrenSyncRequest,
9863            fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
9864        >(
9865            payload,
9866            0x15dea448c536070a,
9867            fidl::encoding::DynamicFlags::FLEXIBLE,
9868            ___deadline,
9869        )?
9870        .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
9871        Ok(_response)
9872    }
9873
9874    /// Indicate that no more children will be created.
9875    ///
9876    /// After creating all children, the client should send
9877    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
9878    /// inform sysmem that no more children will be created, so that sysmem can
9879    /// know when it's ok to start aggregating constraints.
9880    ///
9881    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9882    /// fail the group's subtree and close the connection.
9883    ///
9884    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
9885    /// after `AllChildrenPresent`, else failure of the group's subtree will be
9886    /// triggered. This is intentionally not analogous to how `Release` without
9887    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
9888    /// subtree failure.
9889    pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
9890        self.client.send::<fidl::encoding::EmptyPayload>(
9891            (),
9892            0x5c327e4a23391312,
9893            fidl::encoding::DynamicFlags::FLEXIBLE,
9894        )
9895    }
9896}
9897
9898#[cfg(target_os = "fuchsia")]
9899impl From<BufferCollectionTokenGroupSynchronousProxy> for zx::NullableHandle {
9900    fn from(value: BufferCollectionTokenGroupSynchronousProxy) -> Self {
9901        value.into_channel().into()
9902    }
9903}
9904
9905#[cfg(target_os = "fuchsia")]
9906impl From<fidl::Channel> for BufferCollectionTokenGroupSynchronousProxy {
9907    fn from(value: fidl::Channel) -> Self {
9908        Self::new(value)
9909    }
9910}
9911
9912#[cfg(target_os = "fuchsia")]
9913impl fidl::endpoints::FromClient for BufferCollectionTokenGroupSynchronousProxy {
9914    type Protocol = BufferCollectionTokenGroupMarker;
9915
9916    fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionTokenGroupMarker>) -> Self {
9917        Self::new(value.into_channel())
9918    }
9919}
9920
9921#[derive(Debug, Clone)]
9922pub struct BufferCollectionTokenGroupProxy {
9923    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
9924}
9925
9926impl fidl::endpoints::Proxy for BufferCollectionTokenGroupProxy {
9927    type Protocol = BufferCollectionTokenGroupMarker;
9928
9929    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
9930        Self::new(inner)
9931    }
9932
9933    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
9934        self.client.into_channel().map_err(|client| Self { client })
9935    }
9936
9937    fn as_channel(&self) -> &::fidl::AsyncChannel {
9938        self.client.as_channel()
9939    }
9940}
9941
9942impl BufferCollectionTokenGroupProxy {
9943    /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionTokenGroup.
9944    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
9945        let protocol_name =
9946            <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9947        Self { client: fidl::client::Client::new(channel, protocol_name) }
9948    }
9949
9950    /// Get a Stream of events from the remote end of the protocol.
9951    ///
9952    /// # Panics
9953    ///
9954    /// Panics if the event stream was already taken.
9955    pub fn take_event_stream(&self) -> BufferCollectionTokenGroupEventStream {
9956        BufferCollectionTokenGroupEventStream { event_receiver: self.client.take_event_receiver() }
9957    }
9958
9959    /// Ensure that previous messages have been received server side. This is
9960    /// particularly useful after previous messages that created new tokens,
9961    /// because a token must be known to the sysmem server before sending the
9962    /// token to another participant.
9963    ///
9964    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9965    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9966    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9967    /// to mitigate the possibility of a hostile/fake
9968    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9969    /// Another way is to pass the token to
9970    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9971    /// the token as part of exchanging it for a
9972    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9973    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9974    /// of stalling.
9975    ///
9976    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9977    /// and then starting and completing a `Sync`, it's then safe to send the
9978    /// `BufferCollectionToken` client ends to other participants knowing the
9979    /// server will recognize the tokens when they're sent by the other
9980    /// participants to sysmem in a
9981    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9982    /// efficient way to create tokens while avoiding unnecessary round trips.
9983    ///
9984    /// Other options include waiting for each
9985    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9986    /// individually (using separate call to `Sync` after each), or calling
9987    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9988    /// converted to a `BufferCollection` via
9989    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9990    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9991    /// the sync step and can create multiple tokens at once.
9992    pub fn r#sync(
9993        &self,
9994    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
9995        BufferCollectionTokenGroupProxyInterface::r#sync(self)
9996    }
9997
9998    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9999    ///
10000    /// Normally a participant will convert a `BufferCollectionToken` into a
10001    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
10002    /// `Release` via the token (and then close the channel immediately or
10003    /// shortly later in response to server closing the server end), which
10004    /// avoids causing buffer collection failure. Without a prior `Release`,
10005    /// closing the `BufferCollectionToken` client end will cause buffer
10006    /// collection failure.
10007    ///
10008    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
10009    ///
10010    /// By default the server handles unexpected closure of a
10011    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
10012    /// first) by failing the buffer collection. Partly this is to expedite
10013    /// closing VMO handles to reclaim memory when any participant fails. If a
10014    /// participant would like to cleanly close a `BufferCollection` without
10015    /// causing buffer collection failure, the participant can send `Release`
10016    /// before closing the `BufferCollection` client end. The `Release` can
10017    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
10018    /// buffer collection won't require constraints from this node in order to
10019    /// allocate. If after `SetConstraints`, the constraints are retained and
10020    /// aggregated, despite the lack of `BufferCollection` connection at the
10021    /// time of constraints aggregation.
10022    ///
10023    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
10024    ///
10025    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
10026    /// end (without `Release` first) will trigger failure of the buffer
10027    /// collection. To close a `BufferCollectionTokenGroup` channel without
10028    /// failing the buffer collection, ensure that AllChildrenPresent() has been
10029    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
10030    /// client end.
10031    ///
10032    /// If `Release` occurs before
10033    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
10034    /// buffer collection will fail (triggered by reception of `Release` without
10035    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
10036    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
10037    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
10038    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
10039    /// close requires `AllChildrenPresent` (if not already sent), then
10040    /// `Release`, then close client end.
10041    ///
10042    /// If `Release` occurs after `AllChildrenPresent`, the children and all
10043    /// their constraints remain intact (just as they would if the
10044    /// `BufferCollectionTokenGroup` channel had remained open), and the client
10045    /// end close doesn't trigger buffer collection failure.
10046    ///
10047    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
10048    ///
10049    /// For brevity, the per-channel-protocol paragraphs above ignore the
10050    /// separate failure domain created by
10051    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
10052    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
10053    /// unexpectedly closes (without `Release` first) and that client end is
10054    /// under a failure domain, instead of failing the whole buffer collection,
10055    /// the failure domain is failed, but the buffer collection itself is
10056    /// isolated from failure of the failure domain. Such failure domains can be
10057    /// nested, in which case only the inner-most failure domain in which the
10058    /// `Node` resides fails.
10059    pub fn r#release(&self) -> Result<(), fidl::Error> {
10060        BufferCollectionTokenGroupProxyInterface::r#release(self)
10061    }
10062
10063    /// Set a name for VMOs in this buffer collection.
10064    ///
10065    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
10066    /// will be truncated to fit. The name of the vmo will be suffixed with the
10067    /// buffer index within the collection (if the suffix fits within
10068    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
10069    /// listed in the inspect data.
10070    ///
10071    /// The name only affects VMOs allocated after the name is set; this call
10072    /// does not rename existing VMOs. If multiple clients set different names
10073    /// then the larger priority value will win. Setting a new name with the
10074    /// same priority as a prior name doesn't change the name.
10075    ///
10076    /// All table fields are currently required.
10077    ///
10078    /// + request `priority` The name is only set if this is the first `SetName`
10079    ///   or if `priority` is greater than any previous `priority` value in
10080    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
10081    /// + request `name` The name for VMOs created under this buffer collection.
10082    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10083        BufferCollectionTokenGroupProxyInterface::r#set_name(self, payload)
10084    }
10085
10086    /// Set information about the current client that can be used by sysmem to
10087    /// help diagnose leaking memory and allocation stalls waiting for a
10088    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
10089    ///
10090    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
10091    /// `Node`(s) derived from this `Node`, unless overriden by
10092    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
10093    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
10094    ///
10095    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
10096    /// `Allocator` is the most efficient way to ensure that all
10097    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
10098    /// set, and is also more efficient than separately sending the same debug
10099    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
10100    /// created [`fuchsia.sysmem2/Node`].
10101    ///
10102    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
10103    /// indicate which client is closing their channel first, leading to subtree
10104    /// failure (which can be normal if the purpose of the subtree is over, but
10105    /// if happening earlier than expected, the client-channel-specific name can
10106    /// help diagnose where the failure is first coming from, from sysmem's
10107    /// point of view).
10108    ///
10109    /// All table fields are currently required.
10110    ///
10111    /// + request `name` This can be an arbitrary string, but the current
10112    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
10113    /// + request `id` This can be an arbitrary id, but the current process ID
10114    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
10115    pub fn r#set_debug_client_info(
10116        &self,
10117        mut payload: &NodeSetDebugClientInfoRequest,
10118    ) -> Result<(), fidl::Error> {
10119        BufferCollectionTokenGroupProxyInterface::r#set_debug_client_info(self, payload)
10120    }
10121
10122    /// Sysmem logs a warning if sysmem hasn't seen
10123    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
10124    /// within 5 seconds after creation of a new collection.
10125    ///
10126    /// Clients can call this method to change when the log is printed. If
10127    /// multiple client set the deadline, it's unspecified which deadline will
10128    /// take effect.
10129    ///
10130    /// In most cases the default works well.
10131    ///
10132    /// All table fields are currently required.
10133    ///
10134    /// + request `deadline` The time at which sysmem will start trying to log
10135    ///   the warning, unless all constraints are with sysmem by then.
10136    pub fn r#set_debug_timeout_log_deadline(
10137        &self,
10138        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10139    ) -> Result<(), fidl::Error> {
10140        BufferCollectionTokenGroupProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
10141    }
10142
10143    /// This enables verbose logging for the buffer collection.
10144    ///
10145    /// Verbose logging includes constraints set via
10146    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
10147    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
10148    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
10149    /// the tree of `Node`(s).
10150    ///
10151    /// Normally sysmem prints only a single line complaint when aggregation
10152    /// fails, with just the specific detailed reason that aggregation failed,
10153    /// with little surrounding context.  While this is often enough to diagnose
10154    /// a problem if only a small change was made and everything was working
10155    /// before the small change, it's often not particularly helpful for getting
10156    /// a new buffer collection to work for the first time.  Especially with
10157    /// more complex trees of nodes, involving things like
10158    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
10159    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
10160    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
10161    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
10162    /// looks like and why it's failing a logical allocation, or why a tree or
10163    /// subtree is failing sooner than expected.
10164    ///
10165    /// The intent of the extra logging is to be acceptable from a performance
10166    /// point of view, under the assumption that verbose logging is only enabled
10167    /// on a low number of buffer collections. If we're not tracking down a bug,
10168    /// we shouldn't send this message.
10169    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10170        BufferCollectionTokenGroupProxyInterface::r#set_verbose_logging(self)
10171    }
10172
10173    /// This gets a handle that can be used as a parameter to
10174    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
10175    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
10176    /// client obtained this handle from this `Node`.
10177    ///
10178    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
10179    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
10180    /// despite the two calls typically being on different channels.
10181    ///
10182    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
10183    ///
10184    /// All table fields are currently required.
10185    ///
10186    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
10187    ///   different `Node` channel, to prove that the client obtained the handle
10188    ///   from this `Node`.
10189    pub fn r#get_node_ref(
10190        &self,
10191    ) -> fidl::client::QueryResponseFut<
10192        NodeGetNodeRefResponse,
10193        fidl::encoding::DefaultFuchsiaResourceDialect,
10194    > {
10195        BufferCollectionTokenGroupProxyInterface::r#get_node_ref(self)
10196    }
10197
10198    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
10199    /// rooted at a different child token of a common parent
10200    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
10201    /// passed-in `node_ref`.
10202    ///
10203    /// This call is for assisting with admission control de-duplication, and
10204    /// with debugging.
10205    ///
10206    /// The `node_ref` must be obtained using
10207    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
10208    ///
10209    /// The `node_ref` can be a duplicated handle; it's not necessary to call
10210    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
10211    ///
10212    /// If a calling token may not actually be a valid token at all due to a
10213    /// potentially hostile/untrusted provider of the token, call
10214    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
10215    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
10216    /// never responds due to a calling token not being a real token (not really
10217    /// talking to sysmem).  Another option is to call
10218    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
10219    /// which also validates the token along with converting it to a
10220    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
10221    ///
10222    /// All table fields are currently required.
10223    ///
10224    /// - response `is_alternate`
10225    ///   - true: The first parent node in common between the calling node and
10226    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
10227    ///     that the calling `Node` and the `node_ref` `Node` will not have both
10228    ///     their constraints apply - rather sysmem will choose one or the other
10229    ///     of the constraints - never both.  This is because only one child of
10230    ///     a `BufferCollectionTokenGroup` is selected during logical
10231    ///     allocation, with only that one child's subtree contributing to
10232    ///     constraints aggregation.
10233    ///   - false: The first parent node in common between the calling `Node`
10234    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
10235    ///     Currently, this means the first parent node in common is a
10236    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
10237    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
10238    ///     `Node` may have both their constraints apply during constraints
10239    ///     aggregation of the logical allocation, if both `Node`(s) are
10240    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
10241    ///     this case, there is no `BufferCollectionTokenGroup` that will
10242    ///     directly prevent the two `Node`(s) from both being selected and
10243    ///     their constraints both aggregated, but even when false, one or both
10244    ///     `Node`(s) may still be eliminated from consideration if one or both
10245    ///     `Node`(s) has a direct or indirect parent
10246    ///     `BufferCollectionTokenGroup` which selects a child subtree other
10247    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
10248    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
10249    ///   associated with the same buffer collection as the calling `Node`.
10250    ///   Another reason for this error is if the `node_ref` is an
10251    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
10252    ///   a real `node_ref` obtained from `GetNodeRef`.
10253    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
10254    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
10255    ///   the needed rights expected on a real `node_ref`.
10256    /// * No other failing status codes are returned by this call.  However,
10257    ///   sysmem may add additional codes in future, so the client should have
10258    ///   sensible default handling for any failing status code.
10259    pub fn r#is_alternate_for(
10260        &self,
10261        mut payload: NodeIsAlternateForRequest,
10262    ) -> fidl::client::QueryResponseFut<
10263        NodeIsAlternateForResult,
10264        fidl::encoding::DefaultFuchsiaResourceDialect,
10265    > {
10266        BufferCollectionTokenGroupProxyInterface::r#is_alternate_for(self, payload)
10267    }
10268
10269    /// Get the buffer collection ID. This ID is also available from
10270    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
10271    /// within the collection).
10272    ///
10273    /// This call is mainly useful in situations where we can't convey a
10274    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
10275    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
10276    /// handle, which can be joined back up with a `BufferCollection` client end
10277    /// that was created via a different path. Prefer to convey a
10278    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
10279    ///
10280    /// Trusting a `buffer_collection_id` value from a source other than sysmem
10281    /// is analogous to trusting a koid value from a source other than zircon.
10282    /// Both should be avoided unless really necessary, and both require
10283    /// caution. In some situations it may be reasonable to refer to a
10284    /// pre-established `BufferCollection` by `buffer_collection_id` via a
10285    /// protocol for efficiency reasons, but an incoming value purporting to be
10286    /// a `buffer_collection_id` is not sufficient alone to justify granting the
10287    /// sender of the `buffer_collection_id` any capability. The sender must
10288    /// first prove to a receiver that the sender has/had a VMO or has/had a
10289    /// `BufferCollectionToken` to the same collection by sending a handle that
10290    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
10291    /// `buffer_collection_id` value. The receiver should take care to avoid
10292    /// assuming that a sender had a `BufferCollectionToken` in cases where the
10293    /// sender has only proven that the sender had a VMO.
10294    ///
10295    /// - response `buffer_collection_id` This ID is unique per buffer
10296    ///   collection per boot. Each buffer is uniquely identified by the
10297    ///   `buffer_collection_id` and `buffer_index` together.
10298    pub fn r#get_buffer_collection_id(
10299        &self,
10300    ) -> fidl::client::QueryResponseFut<
10301        NodeGetBufferCollectionIdResponse,
10302        fidl::encoding::DefaultFuchsiaResourceDialect,
10303    > {
10304        BufferCollectionTokenGroupProxyInterface::r#get_buffer_collection_id(self)
10305    }
10306
10307    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
10308    /// created after this message to weak, which means that a client's `Node`
10309    /// client end (or a child created after this message) is not alone
10310    /// sufficient to keep allocated VMOs alive.
10311    ///
10312    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
10313    /// `close_weak_asap`.
10314    ///
10315    /// This message is only permitted before the `Node` becomes ready for
10316    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
10317    ///   * `BufferCollectionToken`: any time
10318    ///   * `BufferCollection`: before `SetConstraints`
10319    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
10320    ///
10321    /// Currently, no conversion from strong `Node` to weak `Node` after ready
10322    /// for allocation is provided, but a client can simulate that by creating
10323    /// an additional `Node` before allocation and setting that additional
10324    /// `Node` to weak, and then potentially at some point later sending
10325    /// `Release` and closing the client end of the client's strong `Node`, but
10326    /// keeping the client's weak `Node`.
10327    ///
10328    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
10329    /// collection failure (all `Node` client end(s) will see
10330    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
10331    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
10332    /// this situation until all `Node`(s) are ready for allocation. For initial
10333    /// allocation to succeed, at least one strong `Node` is required to exist
10334    /// at allocation time, but after that client receives VMO handles, that
10335    /// client can `BufferCollection.Release` and close the client end without
10336    /// causing this type of failure.
10337    ///
10338    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
10339    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
10340    /// separately as appropriate.
10341    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
10342        BufferCollectionTokenGroupProxyInterface::r#set_weak(self)
10343    }
10344
10345    /// This indicates to sysmem that the client is prepared to pay attention to
10346    /// `close_weak_asap`.
10347    ///
10348    /// If sent, this message must be before
10349    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
10350    ///
10351    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
10352    /// send this message before `WaitForAllBuffersAllocated`, or a parent
10353    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
10354    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
10355    /// trigger buffer collection failure.
10356    ///
10357    /// This message is necessary because weak sysmem VMOs have not always been
10358    /// a thing, so older clients are not aware of the need to pay attention to
10359    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
10360    /// sysmem weak VMO handles asap. By having this message and requiring
10361    /// participants to indicate their acceptance of this aspect of the overall
10362    /// protocol, we avoid situations where an older client is delivered a weak
10363    /// VMO without any way for sysmem to get that VMO to close quickly later
10364    /// (and on a per-buffer basis).
10365    ///
10366    /// A participant that doesn't handle `close_weak_asap` and also doesn't
10367    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
10368    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
10369    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
10370    /// same participant has a child/delegate which does retrieve VMOs, that
10371    /// child/delegate will need to send `SetWeakOk` before
10372    /// `WaitForAllBuffersAllocated`.
10373    ///
10374    /// + request `for_child_nodes_also` If present and true, this means direct
10375    ///   child nodes of this node created after this message plus all
10376    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
10377    ///   those nodes. Any child node of this node that was created before this
10378    ///   message is not included. This setting is "sticky" in the sense that a
10379    ///   subsequent `SetWeakOk` without this bool set to true does not reset
10380    ///   the server-side bool. If this creates a problem for a participant, a
10381    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
10382    ///   tokens instead, as appropriate. A participant should only set
10383    ///   `for_child_nodes_also` true if the participant can really promise to
10384    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
10385    ///   weak VMO handles held by participants holding the corresponding child
10386    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
10387    ///   which are using sysmem(1) can be weak, despite the clients of those
10388    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
10389    ///   direct way to find out about `close_weak_asap`. This only applies to
10390    ///   descendents of this `Node` which are using sysmem(1), not to this
10391    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
10392    ///   token, which will fail allocation unless an ancestor of this `Node`
10393    ///   specified `for_child_nodes_also` true.
10394    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10395        BufferCollectionTokenGroupProxyInterface::r#set_weak_ok(self, payload)
10396    }
10397
10398    /// The server_end will be closed after this `Node` and any child nodes have
10399    /// have released their buffer counts, making those counts available for
10400    /// reservation by a different `Node` via
10401    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
10402    ///
10403    /// The `Node` buffer counts may not be released until the entire tree of
10404    /// `Node`(s) is closed or failed, because
10405    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
10406    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
10407    /// `Node` buffer counts remain reserved until the orphaned node is later
10408    /// cleaned up.
10409    ///
10410    /// If the `Node` exceeds a fairly large number of attached eventpair server
10411    /// ends, a log message will indicate this and the `Node` (and the
10412    /// appropriate) sub-tree will fail.
10413    ///
10414    /// The `server_end` will remain open when
10415    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
10416    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
10417    /// [`fuchsia.sysmem2/BufferCollection`].
10418    ///
10419    /// This message can also be used with a
10420    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
10421    pub fn r#attach_node_tracking(
10422        &self,
10423        mut payload: NodeAttachNodeTrackingRequest,
10424    ) -> Result<(), fidl::Error> {
10425        BufferCollectionTokenGroupProxyInterface::r#attach_node_tracking(self, payload)
10426    }
10427
10428    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
10429    /// (including its children) will be selected during allocation (or logical
10430    /// allocation).
10431    ///
10432    /// Before passing the client end of this token to
10433    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
10434    /// [`fuchsia.sysmem2/Node.Sync`] after
10435    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
10436    /// Or the client can use
10437    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
10438    /// essentially includes the `Sync`.
10439    ///
10440    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10441    /// fail the group's subtree and close the connection.
10442    ///
10443    /// After all children have been created, send AllChildrenPresent.
10444    ///
10445    /// + request `token_request` The server end of the new token channel.
10446    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
10447    ///   token allows the holder to get the same rights to buffers as the
10448    ///   parent token (of the group) had. When the value isn't
10449    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
10450    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
10451    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
10452    ///   causes subtree failure.
10453    pub fn r#create_child(
10454        &self,
10455        mut payload: BufferCollectionTokenGroupCreateChildRequest,
10456    ) -> Result<(), fidl::Error> {
10457        BufferCollectionTokenGroupProxyInterface::r#create_child(self, payload)
10458    }
10459
10460    /// Create 1 or more child tokens at once, synchronously.  In contrast to
10461    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
10462    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
10463    /// of a returned token to
10464    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
10465    ///
10466    /// The lower-index child tokens are higher priority (attempted sooner) than
10467    /// higher-index child tokens.
10468    ///
10469    /// As per all child tokens, successful aggregation will choose exactly one
10470    /// child among all created children (across all children created across
10471    /// potentially multiple calls to
10472    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
10473    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
10474    ///
10475    /// The maximum permissible total number of children per group, and total
10476    /// number of nodes in an overall tree (from the root) are capped to limits
10477    /// which are not configurable via these protocols.
10478    ///
10479    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
10480    /// this will fail the group's subtree and close the connection.
10481    ///
10482    /// After all children have been created, send AllChildrenPresent.
10483    ///
10484    /// + request `rights_attentuation_masks` The size of the
10485    ///   `rights_attentuation_masks` determines the number of created child
10486    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
10487    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
10488    ///   other value, each 0 bit in the mask attenuates that right.
10489    /// - response `tokens` The created child tokens.
10490    pub fn r#create_children_sync(
10491        &self,
10492        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10493    ) -> fidl::client::QueryResponseFut<
10494        BufferCollectionTokenGroupCreateChildrenSyncResponse,
10495        fidl::encoding::DefaultFuchsiaResourceDialect,
10496    > {
10497        BufferCollectionTokenGroupProxyInterface::r#create_children_sync(self, payload)
10498    }
10499
10500    /// Indicate that no more children will be created.
10501    ///
10502    /// After creating all children, the client should send
10503    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
10504    /// inform sysmem that no more children will be created, so that sysmem can
10505    /// know when it's ok to start aggregating constraints.
10506    ///
10507    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10508    /// fail the group's subtree and close the connection.
10509    ///
10510    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
10511    /// after `AllChildrenPresent`, else failure of the group's subtree will be
10512    /// triggered. This is intentionally not analogous to how `Release` without
10513    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
10514    /// subtree failure.
10515    pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10516        BufferCollectionTokenGroupProxyInterface::r#all_children_present(self)
10517    }
10518}
10519
10520impl BufferCollectionTokenGroupProxyInterface for BufferCollectionTokenGroupProxy {
10521    type SyncResponseFut =
10522        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
10523    fn r#sync(&self) -> Self::SyncResponseFut {
10524        fn _decode(
10525            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10526        ) -> Result<(), fidl::Error> {
10527            let _response = fidl::client::decode_transaction_body::<
10528                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
10529                fidl::encoding::DefaultFuchsiaResourceDialect,
10530                0x11ac2555cf575b54,
10531            >(_buf?)?
10532            .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
10533            Ok(_response)
10534        }
10535        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
10536            (),
10537            0x11ac2555cf575b54,
10538            fidl::encoding::DynamicFlags::FLEXIBLE,
10539            _decode,
10540        )
10541    }
10542
10543    fn r#release(&self) -> Result<(), fidl::Error> {
10544        self.client.send::<fidl::encoding::EmptyPayload>(
10545            (),
10546            0x6a5cae7d6d6e04c6,
10547            fidl::encoding::DynamicFlags::FLEXIBLE,
10548        )
10549    }
10550
10551    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10552        self.client.send::<NodeSetNameRequest>(
10553            payload,
10554            0xb41f1624f48c1e9,
10555            fidl::encoding::DynamicFlags::FLEXIBLE,
10556        )
10557    }
10558
10559    fn r#set_debug_client_info(
10560        &self,
10561        mut payload: &NodeSetDebugClientInfoRequest,
10562    ) -> Result<(), fidl::Error> {
10563        self.client.send::<NodeSetDebugClientInfoRequest>(
10564            payload,
10565            0x5cde8914608d99b1,
10566            fidl::encoding::DynamicFlags::FLEXIBLE,
10567        )
10568    }
10569
10570    fn r#set_debug_timeout_log_deadline(
10571        &self,
10572        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10573    ) -> Result<(), fidl::Error> {
10574        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
10575            payload,
10576            0x716b0af13d5c0806,
10577            fidl::encoding::DynamicFlags::FLEXIBLE,
10578        )
10579    }
10580
10581    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10582        self.client.send::<fidl::encoding::EmptyPayload>(
10583            (),
10584            0x5209c77415b4dfad,
10585            fidl::encoding::DynamicFlags::FLEXIBLE,
10586        )
10587    }
10588
10589    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
10590        NodeGetNodeRefResponse,
10591        fidl::encoding::DefaultFuchsiaResourceDialect,
10592    >;
10593    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
10594        fn _decode(
10595            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10596        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
10597            let _response = fidl::client::decode_transaction_body::<
10598                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
10599                fidl::encoding::DefaultFuchsiaResourceDialect,
10600                0x5b3d0e51614df053,
10601            >(_buf?)?
10602            .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
10603            Ok(_response)
10604        }
10605        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
10606            (),
10607            0x5b3d0e51614df053,
10608            fidl::encoding::DynamicFlags::FLEXIBLE,
10609            _decode,
10610        )
10611    }
10612
10613    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
10614        NodeIsAlternateForResult,
10615        fidl::encoding::DefaultFuchsiaResourceDialect,
10616    >;
10617    fn r#is_alternate_for(
10618        &self,
10619        mut payload: NodeIsAlternateForRequest,
10620    ) -> Self::IsAlternateForResponseFut {
10621        fn _decode(
10622            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10623        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
10624            let _response = fidl::client::decode_transaction_body::<
10625                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
10626                fidl::encoding::DefaultFuchsiaResourceDialect,
10627                0x3a58e00157e0825,
10628            >(_buf?)?
10629            .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
10630            Ok(_response.map(|x| x))
10631        }
10632        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
10633            &mut payload,
10634            0x3a58e00157e0825,
10635            fidl::encoding::DynamicFlags::FLEXIBLE,
10636            _decode,
10637        )
10638    }
10639
10640    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
10641        NodeGetBufferCollectionIdResponse,
10642        fidl::encoding::DefaultFuchsiaResourceDialect,
10643    >;
10644    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
10645        fn _decode(
10646            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10647        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
10648            let _response = fidl::client::decode_transaction_body::<
10649                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
10650                fidl::encoding::DefaultFuchsiaResourceDialect,
10651                0x77d19a494b78ba8c,
10652            >(_buf?)?
10653            .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
10654            Ok(_response)
10655        }
10656        self.client.send_query_and_decode::<
10657            fidl::encoding::EmptyPayload,
10658            NodeGetBufferCollectionIdResponse,
10659        >(
10660            (),
10661            0x77d19a494b78ba8c,
10662            fidl::encoding::DynamicFlags::FLEXIBLE,
10663            _decode,
10664        )
10665    }
10666
10667    fn r#set_weak(&self) -> Result<(), fidl::Error> {
10668        self.client.send::<fidl::encoding::EmptyPayload>(
10669            (),
10670            0x22dd3ea514eeffe1,
10671            fidl::encoding::DynamicFlags::FLEXIBLE,
10672        )
10673    }
10674
10675    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10676        self.client.send::<NodeSetWeakOkRequest>(
10677            &mut payload,
10678            0x38a44fc4d7724be9,
10679            fidl::encoding::DynamicFlags::FLEXIBLE,
10680        )
10681    }
10682
10683    fn r#attach_node_tracking(
10684        &self,
10685        mut payload: NodeAttachNodeTrackingRequest,
10686    ) -> Result<(), fidl::Error> {
10687        self.client.send::<NodeAttachNodeTrackingRequest>(
10688            &mut payload,
10689            0x3f22f2a293d3cdac,
10690            fidl::encoding::DynamicFlags::FLEXIBLE,
10691        )
10692    }
10693
10694    fn r#create_child(
10695        &self,
10696        mut payload: BufferCollectionTokenGroupCreateChildRequest,
10697    ) -> Result<(), fidl::Error> {
10698        self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
10699            &mut payload,
10700            0x41a0075d419f30c5,
10701            fidl::encoding::DynamicFlags::FLEXIBLE,
10702        )
10703    }
10704
10705    type CreateChildrenSyncResponseFut = fidl::client::QueryResponseFut<
10706        BufferCollectionTokenGroupCreateChildrenSyncResponse,
10707        fidl::encoding::DefaultFuchsiaResourceDialect,
10708    >;
10709    fn r#create_children_sync(
10710        &self,
10711        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10712    ) -> Self::CreateChildrenSyncResponseFut {
10713        fn _decode(
10714            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10715        ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
10716            let _response = fidl::client::decode_transaction_body::<
10717                fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
10718                fidl::encoding::DefaultFuchsiaResourceDialect,
10719                0x15dea448c536070a,
10720            >(_buf?)?
10721            .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
10722            Ok(_response)
10723        }
10724        self.client.send_query_and_decode::<
10725            BufferCollectionTokenGroupCreateChildrenSyncRequest,
10726            BufferCollectionTokenGroupCreateChildrenSyncResponse,
10727        >(
10728            payload,
10729            0x15dea448c536070a,
10730            fidl::encoding::DynamicFlags::FLEXIBLE,
10731            _decode,
10732        )
10733    }
10734
10735    fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10736        self.client.send::<fidl::encoding::EmptyPayload>(
10737            (),
10738            0x5c327e4a23391312,
10739            fidl::encoding::DynamicFlags::FLEXIBLE,
10740        )
10741    }
10742}
10743
10744pub struct BufferCollectionTokenGroupEventStream {
10745    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
10746}
10747
10748impl std::marker::Unpin for BufferCollectionTokenGroupEventStream {}
10749
10750impl futures::stream::FusedStream for BufferCollectionTokenGroupEventStream {
10751    fn is_terminated(&self) -> bool {
10752        self.event_receiver.is_terminated()
10753    }
10754}
10755
10756impl futures::Stream for BufferCollectionTokenGroupEventStream {
10757    type Item = Result<BufferCollectionTokenGroupEvent, fidl::Error>;
10758
10759    fn poll_next(
10760        mut self: std::pin::Pin<&mut Self>,
10761        cx: &mut std::task::Context<'_>,
10762    ) -> std::task::Poll<Option<Self::Item>> {
10763        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
10764            &mut self.event_receiver,
10765            cx
10766        )?) {
10767            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenGroupEvent::decode(buf))),
10768            None => std::task::Poll::Ready(None),
10769        }
10770    }
10771}
10772
10773#[derive(Debug)]
10774pub enum BufferCollectionTokenGroupEvent {
10775    #[non_exhaustive]
10776    _UnknownEvent {
10777        /// Ordinal of the event that was sent.
10778        ordinal: u64,
10779    },
10780}
10781
10782impl BufferCollectionTokenGroupEvent {
10783    /// Decodes a message buffer as a [`BufferCollectionTokenGroupEvent`].
10784    fn decode(
10785        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
10786    ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
10787        let (bytes, _handles) = buf.split_mut();
10788        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10789        debug_assert_eq!(tx_header.tx_id, 0);
10790        match tx_header.ordinal {
10791            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
10792                Ok(BufferCollectionTokenGroupEvent::_UnknownEvent {
10793                    ordinal: tx_header.ordinal,
10794                })
10795            }
10796            _ => Err(fidl::Error::UnknownOrdinal {
10797                ordinal: tx_header.ordinal,
10798                protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
10799            })
10800        }
10801    }
10802}
10803
10804/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionTokenGroup.
10805pub struct BufferCollectionTokenGroupRequestStream {
10806    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10807    is_terminated: bool,
10808}
10809
10810impl std::marker::Unpin for BufferCollectionTokenGroupRequestStream {}
10811
10812impl futures::stream::FusedStream for BufferCollectionTokenGroupRequestStream {
10813    fn is_terminated(&self) -> bool {
10814        self.is_terminated
10815    }
10816}
10817
10818impl fidl::endpoints::RequestStream for BufferCollectionTokenGroupRequestStream {
10819    type Protocol = BufferCollectionTokenGroupMarker;
10820    type ControlHandle = BufferCollectionTokenGroupControlHandle;
10821
10822    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
10823        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
10824    }
10825
10826    fn control_handle(&self) -> Self::ControlHandle {
10827        BufferCollectionTokenGroupControlHandle { inner: self.inner.clone() }
10828    }
10829
10830    fn into_inner(
10831        self,
10832    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
10833    {
10834        (self.inner, self.is_terminated)
10835    }
10836
10837    fn from_inner(
10838        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10839        is_terminated: bool,
10840    ) -> Self {
10841        Self { inner, is_terminated }
10842    }
10843}
10844
10845impl futures::Stream for BufferCollectionTokenGroupRequestStream {
10846    type Item = Result<BufferCollectionTokenGroupRequest, fidl::Error>;
10847
10848    fn poll_next(
10849        mut self: std::pin::Pin<&mut Self>,
10850        cx: &mut std::task::Context<'_>,
10851    ) -> std::task::Poll<Option<Self::Item>> {
10852        let this = &mut *self;
10853        if this.inner.check_shutdown(cx) {
10854            this.is_terminated = true;
10855            return std::task::Poll::Ready(None);
10856        }
10857        if this.is_terminated {
10858            panic!("polled BufferCollectionTokenGroupRequestStream after completion");
10859        }
10860        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
10861            |bytes, handles| {
10862                match this.inner.channel().read_etc(cx, bytes, handles) {
10863                    std::task::Poll::Ready(Ok(())) => {}
10864                    std::task::Poll::Pending => return std::task::Poll::Pending,
10865                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
10866                        this.is_terminated = true;
10867                        return std::task::Poll::Ready(None);
10868                    }
10869                    std::task::Poll::Ready(Err(e)) => {
10870                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
10871                            e.into(),
10872                        ))));
10873                    }
10874                }
10875
10876                // A message has been received from the channel
10877                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10878
10879                std::task::Poll::Ready(Some(match header.ordinal {
10880                0x11ac2555cf575b54 => {
10881                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10882                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10883                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10884                    let control_handle = BufferCollectionTokenGroupControlHandle {
10885                        inner: this.inner.clone(),
10886                    };
10887                    Ok(BufferCollectionTokenGroupRequest::Sync {
10888                        responder: BufferCollectionTokenGroupSyncResponder {
10889                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10890                            tx_id: header.tx_id,
10891                        },
10892                    })
10893                }
10894                0x6a5cae7d6d6e04c6 => {
10895                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10896                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10897                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10898                    let control_handle = BufferCollectionTokenGroupControlHandle {
10899                        inner: this.inner.clone(),
10900                    };
10901                    Ok(BufferCollectionTokenGroupRequest::Release {
10902                        control_handle,
10903                    })
10904                }
10905                0xb41f1624f48c1e9 => {
10906                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10907                    let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10908                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
10909                    let control_handle = BufferCollectionTokenGroupControlHandle {
10910                        inner: this.inner.clone(),
10911                    };
10912                    Ok(BufferCollectionTokenGroupRequest::SetName {payload: req,
10913                        control_handle,
10914                    })
10915                }
10916                0x5cde8914608d99b1 => {
10917                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10918                    let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10919                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
10920                    let control_handle = BufferCollectionTokenGroupControlHandle {
10921                        inner: this.inner.clone(),
10922                    };
10923                    Ok(BufferCollectionTokenGroupRequest::SetDebugClientInfo {payload: req,
10924                        control_handle,
10925                    })
10926                }
10927                0x716b0af13d5c0806 => {
10928                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10929                    let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10930                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
10931                    let control_handle = BufferCollectionTokenGroupControlHandle {
10932                        inner: this.inner.clone(),
10933                    };
10934                    Ok(BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {payload: req,
10935                        control_handle,
10936                    })
10937                }
10938                0x5209c77415b4dfad => {
10939                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10940                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10941                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10942                    let control_handle = BufferCollectionTokenGroupControlHandle {
10943                        inner: this.inner.clone(),
10944                    };
10945                    Ok(BufferCollectionTokenGroupRequest::SetVerboseLogging {
10946                        control_handle,
10947                    })
10948                }
10949                0x5b3d0e51614df053 => {
10950                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10951                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10952                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10953                    let control_handle = BufferCollectionTokenGroupControlHandle {
10954                        inner: this.inner.clone(),
10955                    };
10956                    Ok(BufferCollectionTokenGroupRequest::GetNodeRef {
10957                        responder: BufferCollectionTokenGroupGetNodeRefResponder {
10958                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10959                            tx_id: header.tx_id,
10960                        },
10961                    })
10962                }
10963                0x3a58e00157e0825 => {
10964                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10965                    let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10966                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
10967                    let control_handle = BufferCollectionTokenGroupControlHandle {
10968                        inner: this.inner.clone(),
10969                    };
10970                    Ok(BufferCollectionTokenGroupRequest::IsAlternateFor {payload: req,
10971                        responder: BufferCollectionTokenGroupIsAlternateForResponder {
10972                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10973                            tx_id: header.tx_id,
10974                        },
10975                    })
10976                }
10977                0x77d19a494b78ba8c => {
10978                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10979                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10980                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10981                    let control_handle = BufferCollectionTokenGroupControlHandle {
10982                        inner: this.inner.clone(),
10983                    };
10984                    Ok(BufferCollectionTokenGroupRequest::GetBufferCollectionId {
10985                        responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder {
10986                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10987                            tx_id: header.tx_id,
10988                        },
10989                    })
10990                }
10991                0x22dd3ea514eeffe1 => {
10992                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10993                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10994                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10995                    let control_handle = BufferCollectionTokenGroupControlHandle {
10996                        inner: this.inner.clone(),
10997                    };
10998                    Ok(BufferCollectionTokenGroupRequest::SetWeak {
10999                        control_handle,
11000                    })
11001                }
11002                0x38a44fc4d7724be9 => {
11003                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11004                    let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11005                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
11006                    let control_handle = BufferCollectionTokenGroupControlHandle {
11007                        inner: this.inner.clone(),
11008                    };
11009                    Ok(BufferCollectionTokenGroupRequest::SetWeakOk {payload: req,
11010                        control_handle,
11011                    })
11012                }
11013                0x3f22f2a293d3cdac => {
11014                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11015                    let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11016                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
11017                    let control_handle = BufferCollectionTokenGroupControlHandle {
11018                        inner: this.inner.clone(),
11019                    };
11020                    Ok(BufferCollectionTokenGroupRequest::AttachNodeTracking {payload: req,
11021                        control_handle,
11022                    })
11023                }
11024                0x41a0075d419f30c5 => {
11025                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11026                    let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11027                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildRequest>(&header, _body_bytes, handles, &mut req)?;
11028                    let control_handle = BufferCollectionTokenGroupControlHandle {
11029                        inner: this.inner.clone(),
11030                    };
11031                    Ok(BufferCollectionTokenGroupRequest::CreateChild {payload: req,
11032                        control_handle,
11033                    })
11034                }
11035                0x15dea448c536070a => {
11036                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
11037                    let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildrenSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11038                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildrenSyncRequest>(&header, _body_bytes, handles, &mut req)?;
11039                    let control_handle = BufferCollectionTokenGroupControlHandle {
11040                        inner: this.inner.clone(),
11041                    };
11042                    Ok(BufferCollectionTokenGroupRequest::CreateChildrenSync {payload: req,
11043                        responder: BufferCollectionTokenGroupCreateChildrenSyncResponder {
11044                            control_handle: std::mem::ManuallyDrop::new(control_handle),
11045                            tx_id: header.tx_id,
11046                        },
11047                    })
11048                }
11049                0x5c327e4a23391312 => {
11050                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11051                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
11052                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
11053                    let control_handle = BufferCollectionTokenGroupControlHandle {
11054                        inner: this.inner.clone(),
11055                    };
11056                    Ok(BufferCollectionTokenGroupRequest::AllChildrenPresent {
11057                        control_handle,
11058                    })
11059                }
11060                _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11061                    Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11062                        ordinal: header.ordinal,
11063                        control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11064                        method_type: fidl::MethodType::OneWay,
11065                    })
11066                }
11067                _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11068                    this.inner.send_framework_err(
11069                        fidl::encoding::FrameworkErr::UnknownMethod,
11070                        header.tx_id,
11071                        header.ordinal,
11072                        header.dynamic_flags(),
11073                        (bytes, handles),
11074                    )?;
11075                    Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11076                        ordinal: header.ordinal,
11077                        control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11078                        method_type: fidl::MethodType::TwoWay,
11079                    })
11080                }
11081                _ => Err(fidl::Error::UnknownOrdinal {
11082                    ordinal: header.ordinal,
11083                    protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
11084                }),
11085            }))
11086            },
11087        )
11088    }
11089}
11090
11091/// The sysmem implementation is consistent with a logical / conceptual model of
11092/// allocation / logical allocation as follows:
11093///
11094/// As usual, a logical allocation considers either the root and all nodes with
11095/// connectivity to the root that don't transit a [`fuchsia.sysmem2/Node`]
11096/// created with [`fuchsia.sysmem2/BufferCollection.AttachToken`], or a subtree
11097/// rooted at an `AttachToken` `Node` and all `Node`(s) with connectivity to
11098/// that subtree that don't transit another `AttachToken`.  This is called the
11099/// logical allocation pruned subtree, or pruned subtree for short.
11100///
11101/// During constraints aggregation, each
11102/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] will select a single child
11103/// `Node` among its direct children. The rest of the children will appear to
11104/// fail the logical allocation, while the selected child may succeed.
11105///
11106/// When more than one `BufferCollectionTokenGroup` exists in the overall
11107/// logical allocation pruned subtree, the relative priority between two groups
11108/// is equivalent to their ordering in a DFS pre-order iteration of the tree,
11109/// with parents higher priority than children, and left children higher
11110/// priority than right children.
11111///
11112/// When a particular child of a group is selected (whether provisionally during
11113/// a constraints aggregation attempt, or as a final selection), the
11114/// non-selection of other children of the group will "hide" any other groups
11115/// under those non-selected children.
11116///
11117/// Within a logical allocation, aggregation is attempted first by provisionally
11118/// selecting child 0 of the highest-priority group, and child 0 of the next
11119/// highest-priority group that isn't hidden by the provisional selections so
11120/// far, etc.
11121///
11122/// If that aggregation attempt fails, aggregation will be attempted with the
11123/// ordinal 0 child of all the same groups except the lowest priority non-hidden
11124/// group which will provisionally select its ordinal 1 child (and then child 2
11125/// and so on). If a new lowest-priority group is un-hidden as provisional
11126/// selections are updated, that newly un-hidden lowest-priority group has all
11127/// its children considered in order, before changing the provisional selection
11128/// in the former lowest-priority group. In terms of result, this is equivalent
11129/// to systematic enumeration of all possible combinations of choices in a
11130/// counting-like order updating the lowest-priority group the most often and
11131/// the highest-priority group the least often. Rather than actually attempting
11132/// aggregation with all the combinations, we can skip over combinations which
11133/// are redundant/equivalent due to hiding without any change to the result.
11134///
11135/// Attempted constraint aggregations of enumerated non-equivalent combinations
11136/// of choices continue in this manner until either (a) all aggregation attempts
11137/// fail in which case the overall logical allocation fails, or (b) until an
11138/// attempted aggregation succeeds, in which case buffer allocation (if needed;
11139/// if this is the pruned subtree rooted at the overall root `Node`) is
11140/// attempted once. If buffer allocation based on the first successful
11141/// constraints aggregation fails, the overall logical allocation fails (there
11142/// is no buffer allocation retry / re-attempt). If buffer allocation succeeds
11143/// (or is not needed due to being a pruned subtree that doesn't include the
11144/// root), the logical allocation succeeds.
11145///
11146/// If this prioritization scheme cannot reasonably work for your usage of
11147/// sysmem, please don't hesitate to contact sysmem folks to discuss potentially
11148/// adding a way to achieve what you need.
11149///
11150/// Please avoid creating a large number of `BufferCollectionTokenGroup`(s) per
11151/// logical allocation, especially with large number of children overall, and
11152/// especially in cases where aggregation may reasonably be expected to often
11153/// fail using ordinal 0 children and possibly with later children as well.
11154/// Sysmem mitigates potentially high time complexity of evaluating too many
11155/// child combinations/selections across too many groups by simply failing
11156/// logical allocation beyond a certain (fairly high, but not huge) max number
11157/// of considered group child combinations/selections. More advanced (and more
11158/// complicated) mitigation is not anticipated to be practically necessary or
11159/// worth the added complexity. Please contact sysmem folks if the max limit is
11160/// getting hit or if you anticipate it getting hit, to discuss potential
11161/// options.
11162///
11163/// Prefer to use multiple [`fuchsia.sysmem2/ImageFormatConstraints`] in a
11164/// single [`fuchsia.sysmem2/BufferCollectionConstraints`] when feasible (when a
11165/// participant just needs to express the ability to work with more than a
11166/// single [`fuchsia.images2/PixelFormat`], with sysmem choosing which
11167/// `PixelFormat` to use among those supported by all participants).
11168///
11169/// Similar to [`fuchsia.sysmem2/BufferCollectionToken`] and
11170/// [`fuchsia.sysmem2/BufferCollection`], closure of the
11171/// `BufferCollectionTokenGroup` channel without sending
11172/// [`fuchsia.sysmem2/Node.Release`] first will cause buffer collection failure
11173/// (or subtree failure if using
11174/// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11175/// [`fuchsia.sysmem2/BufferCollection.AttachToken`] and the
11176/// `BufferCollectionTokenGroup` is part of a subtree under such a node that
11177/// doesn't propagate failure to its parent).
11178///
11179/// Epitaphs are not used in this protocol.
11180#[derive(Debug)]
11181pub enum BufferCollectionTokenGroupRequest {
11182    /// Ensure that previous messages have been received server side. This is
11183    /// particularly useful after previous messages that created new tokens,
11184    /// because a token must be known to the sysmem server before sending the
11185    /// token to another participant.
11186    ///
11187    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
11188    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
11189    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
11190    /// to mitigate the possibility of a hostile/fake
11191    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
11192    /// Another way is to pass the token to
11193    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
11194    /// the token as part of exchanging it for a
11195    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
11196    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
11197    /// of stalling.
11198    ///
11199    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
11200    /// and then starting and completing a `Sync`, it's then safe to send the
11201    /// `BufferCollectionToken` client ends to other participants knowing the
11202    /// server will recognize the tokens when they're sent by the other
11203    /// participants to sysmem in a
11204    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
11205    /// efficient way to create tokens while avoiding unnecessary round trips.
11206    ///
11207    /// Other options include waiting for each
11208    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
11209    /// individually (using separate call to `Sync` after each), or calling
11210    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
11211    /// converted to a `BufferCollection` via
11212    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
11213    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
11214    /// the sync step and can create multiple tokens at once.
11215    Sync { responder: BufferCollectionTokenGroupSyncResponder },
11216    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
11217    ///
11218    /// Normally a participant will convert a `BufferCollectionToken` into a
11219    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
11220    /// `Release` via the token (and then close the channel immediately or
11221    /// shortly later in response to server closing the server end), which
11222    /// avoids causing buffer collection failure. Without a prior `Release`,
11223    /// closing the `BufferCollectionToken` client end will cause buffer
11224    /// collection failure.
11225    ///
11226    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
11227    ///
11228    /// By default the server handles unexpected closure of a
11229    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
11230    /// first) by failing the buffer collection. Partly this is to expedite
11231    /// closing VMO handles to reclaim memory when any participant fails. If a
11232    /// participant would like to cleanly close a `BufferCollection` without
11233    /// causing buffer collection failure, the participant can send `Release`
11234    /// before closing the `BufferCollection` client end. The `Release` can
11235    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
11236    /// buffer collection won't require constraints from this node in order to
11237    /// allocate. If after `SetConstraints`, the constraints are retained and
11238    /// aggregated, despite the lack of `BufferCollection` connection at the
11239    /// time of constraints aggregation.
11240    ///
11241    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
11242    ///
11243    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
11244    /// end (without `Release` first) will trigger failure of the buffer
11245    /// collection. To close a `BufferCollectionTokenGroup` channel without
11246    /// failing the buffer collection, ensure that AllChildrenPresent() has been
11247    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
11248    /// client end.
11249    ///
11250    /// If `Release` occurs before
11251    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
11252    /// buffer collection will fail (triggered by reception of `Release` without
11253    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
11254    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
11255    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
11256    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
11257    /// close requires `AllChildrenPresent` (if not already sent), then
11258    /// `Release`, then close client end.
11259    ///
11260    /// If `Release` occurs after `AllChildrenPresent`, the children and all
11261    /// their constraints remain intact (just as they would if the
11262    /// `BufferCollectionTokenGroup` channel had remained open), and the client
11263    /// end close doesn't trigger buffer collection failure.
11264    ///
11265    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
11266    ///
11267    /// For brevity, the per-channel-protocol paragraphs above ignore the
11268    /// separate failure domain created by
11269    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11270    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
11271    /// unexpectedly closes (without `Release` first) and that client end is
11272    /// under a failure domain, instead of failing the whole buffer collection,
11273    /// the failure domain is failed, but the buffer collection itself is
11274    /// isolated from failure of the failure domain. Such failure domains can be
11275    /// nested, in which case only the inner-most failure domain in which the
11276    /// `Node` resides fails.
11277    Release { control_handle: BufferCollectionTokenGroupControlHandle },
11278    /// Set a name for VMOs in this buffer collection.
11279    ///
11280    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
11281    /// will be truncated to fit. The name of the vmo will be suffixed with the
11282    /// buffer index within the collection (if the suffix fits within
11283    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
11284    /// listed in the inspect data.
11285    ///
11286    /// The name only affects VMOs allocated after the name is set; this call
11287    /// does not rename existing VMOs. If multiple clients set different names
11288    /// then the larger priority value will win. Setting a new name with the
11289    /// same priority as a prior name doesn't change the name.
11290    ///
11291    /// All table fields are currently required.
11292    ///
11293    /// + request `priority` The name is only set if this is the first `SetName`
11294    ///   or if `priority` is greater than any previous `priority` value in
11295    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
11296    /// + request `name` The name for VMOs created under this buffer collection.
11297    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenGroupControlHandle },
11298    /// Set information about the current client that can be used by sysmem to
11299    /// help diagnose leaking memory and allocation stalls waiting for a
11300    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
11301    ///
11302    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
11303    /// `Node`(s) derived from this `Node`, unless overriden by
11304    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
11305    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
11306    ///
11307    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
11308    /// `Allocator` is the most efficient way to ensure that all
11309    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
11310    /// set, and is also more efficient than separately sending the same debug
11311    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
11312    /// created [`fuchsia.sysmem2/Node`].
11313    ///
11314    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
11315    /// indicate which client is closing their channel first, leading to subtree
11316    /// failure (which can be normal if the purpose of the subtree is over, but
11317    /// if happening earlier than expected, the client-channel-specific name can
11318    /// help diagnose where the failure is first coming from, from sysmem's
11319    /// point of view).
11320    ///
11321    /// All table fields are currently required.
11322    ///
11323    /// + request `name` This can be an arbitrary string, but the current
11324    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
11325    /// + request `id` This can be an arbitrary id, but the current process ID
11326    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
11327    SetDebugClientInfo {
11328        payload: NodeSetDebugClientInfoRequest,
11329        control_handle: BufferCollectionTokenGroupControlHandle,
11330    },
11331    /// Sysmem logs a warning if sysmem hasn't seen
11332    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
11333    /// within 5 seconds after creation of a new collection.
11334    ///
11335    /// Clients can call this method to change when the log is printed. If
11336    /// multiple client set the deadline, it's unspecified which deadline will
11337    /// take effect.
11338    ///
11339    /// In most cases the default works well.
11340    ///
11341    /// All table fields are currently required.
11342    ///
11343    /// + request `deadline` The time at which sysmem will start trying to log
11344    ///   the warning, unless all constraints are with sysmem by then.
11345    SetDebugTimeoutLogDeadline {
11346        payload: NodeSetDebugTimeoutLogDeadlineRequest,
11347        control_handle: BufferCollectionTokenGroupControlHandle,
11348    },
11349    /// This enables verbose logging for the buffer collection.
11350    ///
11351    /// Verbose logging includes constraints set via
11352    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
11353    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
11354    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
11355    /// the tree of `Node`(s).
11356    ///
11357    /// Normally sysmem prints only a single line complaint when aggregation
11358    /// fails, with just the specific detailed reason that aggregation failed,
11359    /// with little surrounding context.  While this is often enough to diagnose
11360    /// a problem if only a small change was made and everything was working
11361    /// before the small change, it's often not particularly helpful for getting
11362    /// a new buffer collection to work for the first time.  Especially with
11363    /// more complex trees of nodes, involving things like
11364    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
11365    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
11366    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
11367    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
11368    /// looks like and why it's failing a logical allocation, or why a tree or
11369    /// subtree is failing sooner than expected.
11370    ///
11371    /// The intent of the extra logging is to be acceptable from a performance
11372    /// point of view, under the assumption that verbose logging is only enabled
11373    /// on a low number of buffer collections. If we're not tracking down a bug,
11374    /// we shouldn't send this message.
11375    SetVerboseLogging { control_handle: BufferCollectionTokenGroupControlHandle },
11376    /// This gets a handle that can be used as a parameter to
11377    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
11378    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
11379    /// client obtained this handle from this `Node`.
11380    ///
11381    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
11382    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
11383    /// despite the two calls typically being on different channels.
11384    ///
11385    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
11386    ///
11387    /// All table fields are currently required.
11388    ///
11389    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
11390    ///   different `Node` channel, to prove that the client obtained the handle
11391    ///   from this `Node`.
11392    GetNodeRef { responder: BufferCollectionTokenGroupGetNodeRefResponder },
11393    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
11394    /// rooted at a different child token of a common parent
11395    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
11396    /// passed-in `node_ref`.
11397    ///
11398    /// This call is for assisting with admission control de-duplication, and
11399    /// with debugging.
11400    ///
11401    /// The `node_ref` must be obtained using
11402    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
11403    ///
11404    /// The `node_ref` can be a duplicated handle; it's not necessary to call
11405    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
11406    ///
11407    /// If a calling token may not actually be a valid token at all due to a
11408    /// potentially hostile/untrusted provider of the token, call
11409    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
11410    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
11411    /// never responds due to a calling token not being a real token (not really
11412    /// talking to sysmem).  Another option is to call
11413    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
11414    /// which also validates the token along with converting it to a
11415    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
11416    ///
11417    /// All table fields are currently required.
11418    ///
11419    /// - response `is_alternate`
11420    ///   - true: The first parent node in common between the calling node and
11421    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
11422    ///     that the calling `Node` and the `node_ref` `Node` will not have both
11423    ///     their constraints apply - rather sysmem will choose one or the other
11424    ///     of the constraints - never both.  This is because only one child of
11425    ///     a `BufferCollectionTokenGroup` is selected during logical
11426    ///     allocation, with only that one child's subtree contributing to
11427    ///     constraints aggregation.
11428    ///   - false: The first parent node in common between the calling `Node`
11429    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
11430    ///     Currently, this means the first parent node in common is a
11431    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
11432    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
11433    ///     `Node` may have both their constraints apply during constraints
11434    ///     aggregation of the logical allocation, if both `Node`(s) are
11435    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
11436    ///     this case, there is no `BufferCollectionTokenGroup` that will
11437    ///     directly prevent the two `Node`(s) from both being selected and
11438    ///     their constraints both aggregated, but even when false, one or both
11439    ///     `Node`(s) may still be eliminated from consideration if one or both
11440    ///     `Node`(s) has a direct or indirect parent
11441    ///     `BufferCollectionTokenGroup` which selects a child subtree other
11442    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
11443    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
11444    ///   associated with the same buffer collection as the calling `Node`.
11445    ///   Another reason for this error is if the `node_ref` is an
11446    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
11447    ///   a real `node_ref` obtained from `GetNodeRef`.
11448    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
11449    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
11450    ///   the needed rights expected on a real `node_ref`.
11451    /// * No other failing status codes are returned by this call.  However,
11452    ///   sysmem may add additional codes in future, so the client should have
11453    ///   sensible default handling for any failing status code.
11454    IsAlternateFor {
11455        payload: NodeIsAlternateForRequest,
11456        responder: BufferCollectionTokenGroupIsAlternateForResponder,
11457    },
11458    /// Get the buffer collection ID. This ID is also available from
11459    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
11460    /// within the collection).
11461    ///
11462    /// This call is mainly useful in situations where we can't convey a
11463    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
11464    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
11465    /// handle, which can be joined back up with a `BufferCollection` client end
11466    /// that was created via a different path. Prefer to convey a
11467    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
11468    ///
11469    /// Trusting a `buffer_collection_id` value from a source other than sysmem
11470    /// is analogous to trusting a koid value from a source other than zircon.
11471    /// Both should be avoided unless really necessary, and both require
11472    /// caution. In some situations it may be reasonable to refer to a
11473    /// pre-established `BufferCollection` by `buffer_collection_id` via a
11474    /// protocol for efficiency reasons, but an incoming value purporting to be
11475    /// a `buffer_collection_id` is not sufficient alone to justify granting the
11476    /// sender of the `buffer_collection_id` any capability. The sender must
11477    /// first prove to a receiver that the sender has/had a VMO or has/had a
11478    /// `BufferCollectionToken` to the same collection by sending a handle that
11479    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
11480    /// `buffer_collection_id` value. The receiver should take care to avoid
11481    /// assuming that a sender had a `BufferCollectionToken` in cases where the
11482    /// sender has only proven that the sender had a VMO.
11483    ///
11484    /// - response `buffer_collection_id` This ID is unique per buffer
11485    ///   collection per boot. Each buffer is uniquely identified by the
11486    ///   `buffer_collection_id` and `buffer_index` together.
11487    GetBufferCollectionId { responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder },
11488    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
11489    /// created after this message to weak, which means that a client's `Node`
11490    /// client end (or a child created after this message) is not alone
11491    /// sufficient to keep allocated VMOs alive.
11492    ///
11493    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
11494    /// `close_weak_asap`.
11495    ///
11496    /// This message is only permitted before the `Node` becomes ready for
11497    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
11498    ///   * `BufferCollectionToken`: any time
11499    ///   * `BufferCollection`: before `SetConstraints`
11500    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
11501    ///
11502    /// Currently, no conversion from strong `Node` to weak `Node` after ready
11503    /// for allocation is provided, but a client can simulate that by creating
11504    /// an additional `Node` before allocation and setting that additional
11505    /// `Node` to weak, and then potentially at some point later sending
11506    /// `Release` and closing the client end of the client's strong `Node`, but
11507    /// keeping the client's weak `Node`.
11508    ///
11509    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
11510    /// collection failure (all `Node` client end(s) will see
11511    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
11512    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
11513    /// this situation until all `Node`(s) are ready for allocation. For initial
11514    /// allocation to succeed, at least one strong `Node` is required to exist
11515    /// at allocation time, but after that client receives VMO handles, that
11516    /// client can `BufferCollection.Release` and close the client end without
11517    /// causing this type of failure.
11518    ///
11519    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
11520    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
11521    /// separately as appropriate.
11522    SetWeak { control_handle: BufferCollectionTokenGroupControlHandle },
11523    /// This indicates to sysmem that the client is prepared to pay attention to
11524    /// `close_weak_asap`.
11525    ///
11526    /// If sent, this message must be before
11527    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
11528    ///
11529    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
11530    /// send this message before `WaitForAllBuffersAllocated`, or a parent
11531    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
11532    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
11533    /// trigger buffer collection failure.
11534    ///
11535    /// This message is necessary because weak sysmem VMOs have not always been
11536    /// a thing, so older clients are not aware of the need to pay attention to
11537    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
11538    /// sysmem weak VMO handles asap. By having this message and requiring
11539    /// participants to indicate their acceptance of this aspect of the overall
11540    /// protocol, we avoid situations where an older client is delivered a weak
11541    /// VMO without any way for sysmem to get that VMO to close quickly later
11542    /// (and on a per-buffer basis).
11543    ///
11544    /// A participant that doesn't handle `close_weak_asap` and also doesn't
11545    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
11546    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
11547    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
11548    /// same participant has a child/delegate which does retrieve VMOs, that
11549    /// child/delegate will need to send `SetWeakOk` before
11550    /// `WaitForAllBuffersAllocated`.
11551    ///
11552    /// + request `for_child_nodes_also` If present and true, this means direct
11553    ///   child nodes of this node created after this message plus all
11554    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
11555    ///   those nodes. Any child node of this node that was created before this
11556    ///   message is not included. This setting is "sticky" in the sense that a
11557    ///   subsequent `SetWeakOk` without this bool set to true does not reset
11558    ///   the server-side bool. If this creates a problem for a participant, a
11559    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
11560    ///   tokens instead, as appropriate. A participant should only set
11561    ///   `for_child_nodes_also` true if the participant can really promise to
11562    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
11563    ///   weak VMO handles held by participants holding the corresponding child
11564    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
11565    ///   which are using sysmem(1) can be weak, despite the clients of those
11566    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
11567    ///   direct way to find out about `close_weak_asap`. This only applies to
11568    ///   descendents of this `Node` which are using sysmem(1), not to this
11569    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
11570    ///   token, which will fail allocation unless an ancestor of this `Node`
11571    ///   specified `for_child_nodes_also` true.
11572    SetWeakOk {
11573        payload: NodeSetWeakOkRequest,
11574        control_handle: BufferCollectionTokenGroupControlHandle,
11575    },
11576    /// The server_end will be closed after this `Node` and any child nodes have
11577    /// have released their buffer counts, making those counts available for
11578    /// reservation by a different `Node` via
11579    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
11580    ///
11581    /// The `Node` buffer counts may not be released until the entire tree of
11582    /// `Node`(s) is closed or failed, because
11583    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
11584    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
11585    /// `Node` buffer counts remain reserved until the orphaned node is later
11586    /// cleaned up.
11587    ///
11588    /// If the `Node` exceeds a fairly large number of attached eventpair server
11589    /// ends, a log message will indicate this and the `Node` (and the
11590    /// appropriate) sub-tree will fail.
11591    ///
11592    /// The `server_end` will remain open when
11593    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
11594    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
11595    /// [`fuchsia.sysmem2/BufferCollection`].
11596    ///
11597    /// This message can also be used with a
11598    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
11599    AttachNodeTracking {
11600        payload: NodeAttachNodeTrackingRequest,
11601        control_handle: BufferCollectionTokenGroupControlHandle,
11602    },
11603    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
11604    /// (including its children) will be selected during allocation (or logical
11605    /// allocation).
11606    ///
11607    /// Before passing the client end of this token to
11608    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
11609    /// [`fuchsia.sysmem2/Node.Sync`] after
11610    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
11611    /// Or the client can use
11612    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
11613    /// essentially includes the `Sync`.
11614    ///
11615    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11616    /// fail the group's subtree and close the connection.
11617    ///
11618    /// After all children have been created, send AllChildrenPresent.
11619    ///
11620    /// + request `token_request` The server end of the new token channel.
11621    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
11622    ///   token allows the holder to get the same rights to buffers as the
11623    ///   parent token (of the group) had. When the value isn't
11624    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
11625    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
11626    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
11627    ///   causes subtree failure.
11628    CreateChild {
11629        payload: BufferCollectionTokenGroupCreateChildRequest,
11630        control_handle: BufferCollectionTokenGroupControlHandle,
11631    },
11632    /// Create 1 or more child tokens at once, synchronously.  In contrast to
11633    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
11634    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
11635    /// of a returned token to
11636    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
11637    ///
11638    /// The lower-index child tokens are higher priority (attempted sooner) than
11639    /// higher-index child tokens.
11640    ///
11641    /// As per all child tokens, successful aggregation will choose exactly one
11642    /// child among all created children (across all children created across
11643    /// potentially multiple calls to
11644    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
11645    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
11646    ///
11647    /// The maximum permissible total number of children per group, and total
11648    /// number of nodes in an overall tree (from the root) are capped to limits
11649    /// which are not configurable via these protocols.
11650    ///
11651    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
11652    /// this will fail the group's subtree and close the connection.
11653    ///
11654    /// After all children have been created, send AllChildrenPresent.
11655    ///
11656    /// + request `rights_attentuation_masks` The size of the
11657    ///   `rights_attentuation_masks` determines the number of created child
11658    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
11659    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
11660    ///   other value, each 0 bit in the mask attenuates that right.
11661    /// - response `tokens` The created child tokens.
11662    CreateChildrenSync {
11663        payload: BufferCollectionTokenGroupCreateChildrenSyncRequest,
11664        responder: BufferCollectionTokenGroupCreateChildrenSyncResponder,
11665    },
11666    /// Indicate that no more children will be created.
11667    ///
11668    /// After creating all children, the client should send
11669    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
11670    /// inform sysmem that no more children will be created, so that sysmem can
11671    /// know when it's ok to start aggregating constraints.
11672    ///
11673    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11674    /// fail the group's subtree and close the connection.
11675    ///
11676    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
11677    /// after `AllChildrenPresent`, else failure of the group's subtree will be
11678    /// triggered. This is intentionally not analogous to how `Release` without
11679    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
11680    /// subtree failure.
11681    AllChildrenPresent { control_handle: BufferCollectionTokenGroupControlHandle },
11682    /// An interaction was received which does not match any known method.
11683    #[non_exhaustive]
11684    _UnknownMethod {
11685        /// Ordinal of the method that was called.
11686        ordinal: u64,
11687        control_handle: BufferCollectionTokenGroupControlHandle,
11688        method_type: fidl::MethodType,
11689    },
11690}
11691
11692impl BufferCollectionTokenGroupRequest {
11693    #[allow(irrefutable_let_patterns)]
11694    pub fn into_sync(self) -> Option<(BufferCollectionTokenGroupSyncResponder)> {
11695        if let BufferCollectionTokenGroupRequest::Sync { responder } = self {
11696            Some((responder))
11697        } else {
11698            None
11699        }
11700    }
11701
11702    #[allow(irrefutable_let_patterns)]
11703    pub fn into_release(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11704        if let BufferCollectionTokenGroupRequest::Release { control_handle } = self {
11705            Some((control_handle))
11706        } else {
11707            None
11708        }
11709    }
11710
11711    #[allow(irrefutable_let_patterns)]
11712    pub fn into_set_name(
11713        self,
11714    ) -> Option<(NodeSetNameRequest, BufferCollectionTokenGroupControlHandle)> {
11715        if let BufferCollectionTokenGroupRequest::SetName { payload, control_handle } = self {
11716            Some((payload, control_handle))
11717        } else {
11718            None
11719        }
11720    }
11721
11722    #[allow(irrefutable_let_patterns)]
11723    pub fn into_set_debug_client_info(
11724        self,
11725    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenGroupControlHandle)> {
11726        if let BufferCollectionTokenGroupRequest::SetDebugClientInfo { payload, control_handle } =
11727            self
11728        {
11729            Some((payload, control_handle))
11730        } else {
11731            None
11732        }
11733    }
11734
11735    #[allow(irrefutable_let_patterns)]
11736    pub fn into_set_debug_timeout_log_deadline(
11737        self,
11738    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenGroupControlHandle)>
11739    {
11740        if let BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {
11741            payload,
11742            control_handle,
11743        } = self
11744        {
11745            Some((payload, control_handle))
11746        } else {
11747            None
11748        }
11749    }
11750
11751    #[allow(irrefutable_let_patterns)]
11752    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11753        if let BufferCollectionTokenGroupRequest::SetVerboseLogging { control_handle } = self {
11754            Some((control_handle))
11755        } else {
11756            None
11757        }
11758    }
11759
11760    #[allow(irrefutable_let_patterns)]
11761    pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGroupGetNodeRefResponder)> {
11762        if let BufferCollectionTokenGroupRequest::GetNodeRef { responder } = self {
11763            Some((responder))
11764        } else {
11765            None
11766        }
11767    }
11768
11769    #[allow(irrefutable_let_patterns)]
11770    pub fn into_is_alternate_for(
11771        self,
11772    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenGroupIsAlternateForResponder)>
11773    {
11774        if let BufferCollectionTokenGroupRequest::IsAlternateFor { payload, responder } = self {
11775            Some((payload, responder))
11776        } else {
11777            None
11778        }
11779    }
11780
11781    #[allow(irrefutable_let_patterns)]
11782    pub fn into_get_buffer_collection_id(
11783        self,
11784    ) -> Option<(BufferCollectionTokenGroupGetBufferCollectionIdResponder)> {
11785        if let BufferCollectionTokenGroupRequest::GetBufferCollectionId { responder } = self {
11786            Some((responder))
11787        } else {
11788            None
11789        }
11790    }
11791
11792    #[allow(irrefutable_let_patterns)]
11793    pub fn into_set_weak(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11794        if let BufferCollectionTokenGroupRequest::SetWeak { control_handle } = self {
11795            Some((control_handle))
11796        } else {
11797            None
11798        }
11799    }
11800
11801    #[allow(irrefutable_let_patterns)]
11802    pub fn into_set_weak_ok(
11803        self,
11804    ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenGroupControlHandle)> {
11805        if let BufferCollectionTokenGroupRequest::SetWeakOk { payload, control_handle } = self {
11806            Some((payload, control_handle))
11807        } else {
11808            None
11809        }
11810    }
11811
11812    #[allow(irrefutable_let_patterns)]
11813    pub fn into_attach_node_tracking(
11814        self,
11815    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenGroupControlHandle)> {
11816        if let BufferCollectionTokenGroupRequest::AttachNodeTracking { payload, control_handle } =
11817            self
11818        {
11819            Some((payload, control_handle))
11820        } else {
11821            None
11822        }
11823    }
11824
11825    #[allow(irrefutable_let_patterns)]
11826    pub fn into_create_child(
11827        self,
11828    ) -> Option<(
11829        BufferCollectionTokenGroupCreateChildRequest,
11830        BufferCollectionTokenGroupControlHandle,
11831    )> {
11832        if let BufferCollectionTokenGroupRequest::CreateChild { payload, control_handle } = self {
11833            Some((payload, control_handle))
11834        } else {
11835            None
11836        }
11837    }
11838
11839    #[allow(irrefutable_let_patterns)]
11840    pub fn into_create_children_sync(
11841        self,
11842    ) -> Option<(
11843        BufferCollectionTokenGroupCreateChildrenSyncRequest,
11844        BufferCollectionTokenGroupCreateChildrenSyncResponder,
11845    )> {
11846        if let BufferCollectionTokenGroupRequest::CreateChildrenSync { payload, responder } = self {
11847            Some((payload, responder))
11848        } else {
11849            None
11850        }
11851    }
11852
11853    #[allow(irrefutable_let_patterns)]
11854    pub fn into_all_children_present(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11855        if let BufferCollectionTokenGroupRequest::AllChildrenPresent { control_handle } = self {
11856            Some((control_handle))
11857        } else {
11858            None
11859        }
11860    }
11861
11862    /// Name of the method defined in FIDL
11863    pub fn method_name(&self) -> &'static str {
11864        match *self {
11865            BufferCollectionTokenGroupRequest::Sync { .. } => "sync",
11866            BufferCollectionTokenGroupRequest::Release { .. } => "release",
11867            BufferCollectionTokenGroupRequest::SetName { .. } => "set_name",
11868            BufferCollectionTokenGroupRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
11869            BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline { .. } => {
11870                "set_debug_timeout_log_deadline"
11871            }
11872            BufferCollectionTokenGroupRequest::SetVerboseLogging { .. } => "set_verbose_logging",
11873            BufferCollectionTokenGroupRequest::GetNodeRef { .. } => "get_node_ref",
11874            BufferCollectionTokenGroupRequest::IsAlternateFor { .. } => "is_alternate_for",
11875            BufferCollectionTokenGroupRequest::GetBufferCollectionId { .. } => {
11876                "get_buffer_collection_id"
11877            }
11878            BufferCollectionTokenGroupRequest::SetWeak { .. } => "set_weak",
11879            BufferCollectionTokenGroupRequest::SetWeakOk { .. } => "set_weak_ok",
11880            BufferCollectionTokenGroupRequest::AttachNodeTracking { .. } => "attach_node_tracking",
11881            BufferCollectionTokenGroupRequest::CreateChild { .. } => "create_child",
11882            BufferCollectionTokenGroupRequest::CreateChildrenSync { .. } => "create_children_sync",
11883            BufferCollectionTokenGroupRequest::AllChildrenPresent { .. } => "all_children_present",
11884            BufferCollectionTokenGroupRequest::_UnknownMethod {
11885                method_type: fidl::MethodType::OneWay,
11886                ..
11887            } => "unknown one-way method",
11888            BufferCollectionTokenGroupRequest::_UnknownMethod {
11889                method_type: fidl::MethodType::TwoWay,
11890                ..
11891            } => "unknown two-way method",
11892        }
11893    }
11894}
11895
11896#[derive(Debug, Clone)]
11897pub struct BufferCollectionTokenGroupControlHandle {
11898    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
11899}
11900
11901impl fidl::endpoints::ControlHandle for BufferCollectionTokenGroupControlHandle {
11902    fn shutdown(&self) {
11903        self.inner.shutdown()
11904    }
11905
11906    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
11907        self.inner.shutdown_with_epitaph(status)
11908    }
11909
11910    fn is_closed(&self) -> bool {
11911        self.inner.channel().is_closed()
11912    }
11913    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
11914        self.inner.channel().on_closed()
11915    }
11916
11917    #[cfg(target_os = "fuchsia")]
11918    fn signal_peer(
11919        &self,
11920        clear_mask: zx::Signals,
11921        set_mask: zx::Signals,
11922    ) -> Result<(), zx_status::Status> {
11923        use fidl::Peered;
11924        self.inner.channel().signal_peer(clear_mask, set_mask)
11925    }
11926}
11927
11928impl BufferCollectionTokenGroupControlHandle {}
11929
11930#[must_use = "FIDL methods require a response to be sent"]
11931#[derive(Debug)]
11932pub struct BufferCollectionTokenGroupSyncResponder {
11933    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11934    tx_id: u32,
11935}
11936
11937/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11938/// if the responder is dropped without sending a response, so that the client
11939/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11940impl std::ops::Drop for BufferCollectionTokenGroupSyncResponder {
11941    fn drop(&mut self) {
11942        self.control_handle.shutdown();
11943        // Safety: drops once, never accessed again
11944        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11945    }
11946}
11947
11948impl fidl::endpoints::Responder for BufferCollectionTokenGroupSyncResponder {
11949    type ControlHandle = BufferCollectionTokenGroupControlHandle;
11950
11951    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11952        &self.control_handle
11953    }
11954
11955    fn drop_without_shutdown(mut self) {
11956        // Safety: drops once, never accessed again due to mem::forget
11957        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11958        // Prevent Drop from running (which would shut down the channel)
11959        std::mem::forget(self);
11960    }
11961}
11962
11963impl BufferCollectionTokenGroupSyncResponder {
11964    /// Sends a response to the FIDL transaction.
11965    ///
11966    /// Sets the channel to shutdown if an error occurs.
11967    pub fn send(self) -> Result<(), fidl::Error> {
11968        let _result = self.send_raw();
11969        if _result.is_err() {
11970            self.control_handle.shutdown();
11971        }
11972        self.drop_without_shutdown();
11973        _result
11974    }
11975
11976    /// Similar to "send" but does not shutdown the channel if an error occurs.
11977    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
11978        let _result = self.send_raw();
11979        self.drop_without_shutdown();
11980        _result
11981    }
11982
11983    fn send_raw(&self) -> Result<(), fidl::Error> {
11984        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
11985            fidl::encoding::Flexible::new(()),
11986            self.tx_id,
11987            0x11ac2555cf575b54,
11988            fidl::encoding::DynamicFlags::FLEXIBLE,
11989        )
11990    }
11991}
11992
11993#[must_use = "FIDL methods require a response to be sent"]
11994#[derive(Debug)]
11995pub struct BufferCollectionTokenGroupGetNodeRefResponder {
11996    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11997    tx_id: u32,
11998}
11999
12000/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12001/// if the responder is dropped without sending a response, so that the client
12002/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12003impl std::ops::Drop for BufferCollectionTokenGroupGetNodeRefResponder {
12004    fn drop(&mut self) {
12005        self.control_handle.shutdown();
12006        // Safety: drops once, never accessed again
12007        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12008    }
12009}
12010
12011impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetNodeRefResponder {
12012    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12013
12014    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12015        &self.control_handle
12016    }
12017
12018    fn drop_without_shutdown(mut self) {
12019        // Safety: drops once, never accessed again due to mem::forget
12020        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12021        // Prevent Drop from running (which would shut down the channel)
12022        std::mem::forget(self);
12023    }
12024}
12025
12026impl BufferCollectionTokenGroupGetNodeRefResponder {
12027    /// Sends a response to the FIDL transaction.
12028    ///
12029    /// Sets the channel to shutdown if an error occurs.
12030    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
12031        let _result = self.send_raw(payload);
12032        if _result.is_err() {
12033            self.control_handle.shutdown();
12034        }
12035        self.drop_without_shutdown();
12036        _result
12037    }
12038
12039    /// Similar to "send" but does not shutdown the channel if an error occurs.
12040    pub fn send_no_shutdown_on_err(
12041        self,
12042        mut payload: NodeGetNodeRefResponse,
12043    ) -> Result<(), fidl::Error> {
12044        let _result = self.send_raw(payload);
12045        self.drop_without_shutdown();
12046        _result
12047    }
12048
12049    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
12050        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
12051            fidl::encoding::Flexible::new(&mut payload),
12052            self.tx_id,
12053            0x5b3d0e51614df053,
12054            fidl::encoding::DynamicFlags::FLEXIBLE,
12055        )
12056    }
12057}
12058
12059#[must_use = "FIDL methods require a response to be sent"]
12060#[derive(Debug)]
12061pub struct BufferCollectionTokenGroupIsAlternateForResponder {
12062    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12063    tx_id: u32,
12064}
12065
12066/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12067/// if the responder is dropped without sending a response, so that the client
12068/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12069impl std::ops::Drop for BufferCollectionTokenGroupIsAlternateForResponder {
12070    fn drop(&mut self) {
12071        self.control_handle.shutdown();
12072        // Safety: drops once, never accessed again
12073        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12074    }
12075}
12076
12077impl fidl::endpoints::Responder for BufferCollectionTokenGroupIsAlternateForResponder {
12078    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12079
12080    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12081        &self.control_handle
12082    }
12083
12084    fn drop_without_shutdown(mut self) {
12085        // Safety: drops once, never accessed again due to mem::forget
12086        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12087        // Prevent Drop from running (which would shut down the channel)
12088        std::mem::forget(self);
12089    }
12090}
12091
12092impl BufferCollectionTokenGroupIsAlternateForResponder {
12093    /// Sends a response to the FIDL transaction.
12094    ///
12095    /// Sets the channel to shutdown if an error occurs.
12096    pub fn send(
12097        self,
12098        mut result: Result<&NodeIsAlternateForResponse, Error>,
12099    ) -> Result<(), fidl::Error> {
12100        let _result = self.send_raw(result);
12101        if _result.is_err() {
12102            self.control_handle.shutdown();
12103        }
12104        self.drop_without_shutdown();
12105        _result
12106    }
12107
12108    /// Similar to "send" but does not shutdown the channel if an error occurs.
12109    pub fn send_no_shutdown_on_err(
12110        self,
12111        mut result: Result<&NodeIsAlternateForResponse, Error>,
12112    ) -> Result<(), fidl::Error> {
12113        let _result = self.send_raw(result);
12114        self.drop_without_shutdown();
12115        _result
12116    }
12117
12118    fn send_raw(
12119        &self,
12120        mut result: Result<&NodeIsAlternateForResponse, Error>,
12121    ) -> Result<(), fidl::Error> {
12122        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
12123            NodeIsAlternateForResponse,
12124            Error,
12125        >>(
12126            fidl::encoding::FlexibleResult::new(result),
12127            self.tx_id,
12128            0x3a58e00157e0825,
12129            fidl::encoding::DynamicFlags::FLEXIBLE,
12130        )
12131    }
12132}
12133
12134#[must_use = "FIDL methods require a response to be sent"]
12135#[derive(Debug)]
12136pub struct BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12137    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12138    tx_id: u32,
12139}
12140
12141/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12142/// if the responder is dropped without sending a response, so that the client
12143/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12144impl std::ops::Drop for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12145    fn drop(&mut self) {
12146        self.control_handle.shutdown();
12147        // Safety: drops once, never accessed again
12148        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12149    }
12150}
12151
12152impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12153    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12154
12155    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12156        &self.control_handle
12157    }
12158
12159    fn drop_without_shutdown(mut self) {
12160        // Safety: drops once, never accessed again due to mem::forget
12161        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12162        // Prevent Drop from running (which would shut down the channel)
12163        std::mem::forget(self);
12164    }
12165}
12166
12167impl BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12168    /// Sends a response to the FIDL transaction.
12169    ///
12170    /// Sets the channel to shutdown if an error occurs.
12171    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12172        let _result = self.send_raw(payload);
12173        if _result.is_err() {
12174            self.control_handle.shutdown();
12175        }
12176        self.drop_without_shutdown();
12177        _result
12178    }
12179
12180    /// Similar to "send" but does not shutdown the channel if an error occurs.
12181    pub fn send_no_shutdown_on_err(
12182        self,
12183        mut payload: &NodeGetBufferCollectionIdResponse,
12184    ) -> Result<(), fidl::Error> {
12185        let _result = self.send_raw(payload);
12186        self.drop_without_shutdown();
12187        _result
12188    }
12189
12190    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12191        self.control_handle
12192            .inner
12193            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
12194                fidl::encoding::Flexible::new(payload),
12195                self.tx_id,
12196                0x77d19a494b78ba8c,
12197                fidl::encoding::DynamicFlags::FLEXIBLE,
12198            )
12199    }
12200}
12201
12202#[must_use = "FIDL methods require a response to be sent"]
12203#[derive(Debug)]
12204pub struct BufferCollectionTokenGroupCreateChildrenSyncResponder {
12205    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12206    tx_id: u32,
12207}
12208
12209/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12210/// if the responder is dropped without sending a response, so that the client
12211/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12212impl std::ops::Drop for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12213    fn drop(&mut self) {
12214        self.control_handle.shutdown();
12215        // Safety: drops once, never accessed again
12216        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12217    }
12218}
12219
12220impl fidl::endpoints::Responder for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12221    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12222
12223    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12224        &self.control_handle
12225    }
12226
12227    fn drop_without_shutdown(mut self) {
12228        // Safety: drops once, never accessed again due to mem::forget
12229        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12230        // Prevent Drop from running (which would shut down the channel)
12231        std::mem::forget(self);
12232    }
12233}
12234
12235impl BufferCollectionTokenGroupCreateChildrenSyncResponder {
12236    /// Sends a response to the FIDL transaction.
12237    ///
12238    /// Sets the channel to shutdown if an error occurs.
12239    pub fn send(
12240        self,
12241        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12242    ) -> Result<(), fidl::Error> {
12243        let _result = self.send_raw(payload);
12244        if _result.is_err() {
12245            self.control_handle.shutdown();
12246        }
12247        self.drop_without_shutdown();
12248        _result
12249    }
12250
12251    /// Similar to "send" but does not shutdown the channel if an error occurs.
12252    pub fn send_no_shutdown_on_err(
12253        self,
12254        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12255    ) -> Result<(), fidl::Error> {
12256        let _result = self.send_raw(payload);
12257        self.drop_without_shutdown();
12258        _result
12259    }
12260
12261    fn send_raw(
12262        &self,
12263        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12264    ) -> Result<(), fidl::Error> {
12265        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
12266            BufferCollectionTokenGroupCreateChildrenSyncResponse,
12267        >>(
12268            fidl::encoding::Flexible::new(&mut payload),
12269            self.tx_id,
12270            0x15dea448c536070a,
12271            fidl::encoding::DynamicFlags::FLEXIBLE,
12272        )
12273    }
12274}
12275
12276#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
12277pub struct NodeMarker;
12278
12279impl fidl::endpoints::ProtocolMarker for NodeMarker {
12280    type Proxy = NodeProxy;
12281    type RequestStream = NodeRequestStream;
12282    #[cfg(target_os = "fuchsia")]
12283    type SynchronousProxy = NodeSynchronousProxy;
12284
12285    const DEBUG_NAME: &'static str = "(anonymous) Node";
12286}
12287pub type NodeIsAlternateForResult = Result<NodeIsAlternateForResponse, Error>;
12288
12289pub trait NodeProxyInterface: Send + Sync {
12290    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
12291    fn r#sync(&self) -> Self::SyncResponseFut;
12292    fn r#release(&self) -> Result<(), fidl::Error>;
12293    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
12294    fn r#set_debug_client_info(
12295        &self,
12296        payload: &NodeSetDebugClientInfoRequest,
12297    ) -> Result<(), fidl::Error>;
12298    fn r#set_debug_timeout_log_deadline(
12299        &self,
12300        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12301    ) -> Result<(), fidl::Error>;
12302    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
12303    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
12304        + Send;
12305    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
12306    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
12307        + Send;
12308    fn r#is_alternate_for(
12309        &self,
12310        payload: NodeIsAlternateForRequest,
12311    ) -> Self::IsAlternateForResponseFut;
12312    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
12313        + Send;
12314    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
12315    fn r#set_weak(&self) -> Result<(), fidl::Error>;
12316    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
12317    fn r#attach_node_tracking(
12318        &self,
12319        payload: NodeAttachNodeTrackingRequest,
12320    ) -> Result<(), fidl::Error>;
12321}
12322#[derive(Debug)]
12323#[cfg(target_os = "fuchsia")]
12324pub struct NodeSynchronousProxy {
12325    client: fidl::client::sync::Client,
12326}
12327
12328#[cfg(target_os = "fuchsia")]
12329impl fidl::endpoints::SynchronousProxy for NodeSynchronousProxy {
12330    type Proxy = NodeProxy;
12331    type Protocol = NodeMarker;
12332
12333    fn from_channel(inner: fidl::Channel) -> Self {
12334        Self::new(inner)
12335    }
12336
12337    fn into_channel(self) -> fidl::Channel {
12338        self.client.into_channel()
12339    }
12340
12341    fn as_channel(&self) -> &fidl::Channel {
12342        self.client.as_channel()
12343    }
12344}
12345
12346#[cfg(target_os = "fuchsia")]
12347impl NodeSynchronousProxy {
12348    pub fn new(channel: fidl::Channel) -> Self {
12349        let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12350        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
12351    }
12352
12353    pub fn into_channel(self) -> fidl::Channel {
12354        self.client.into_channel()
12355    }
12356
12357    /// Waits until an event arrives and returns it. It is safe for other
12358    /// threads to make concurrent requests while waiting for an event.
12359    pub fn wait_for_event(&self, deadline: zx::MonotonicInstant) -> Result<NodeEvent, fidl::Error> {
12360        NodeEvent::decode(self.client.wait_for_event(deadline)?)
12361    }
12362
12363    /// Ensure that previous messages have been received server side. This is
12364    /// particularly useful after previous messages that created new tokens,
12365    /// because a token must be known to the sysmem server before sending the
12366    /// token to another participant.
12367    ///
12368    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12369    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12370    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12371    /// to mitigate the possibility of a hostile/fake
12372    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12373    /// Another way is to pass the token to
12374    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12375    /// the token as part of exchanging it for a
12376    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12377    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12378    /// of stalling.
12379    ///
12380    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12381    /// and then starting and completing a `Sync`, it's then safe to send the
12382    /// `BufferCollectionToken` client ends to other participants knowing the
12383    /// server will recognize the tokens when they're sent by the other
12384    /// participants to sysmem in a
12385    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12386    /// efficient way to create tokens while avoiding unnecessary round trips.
12387    ///
12388    /// Other options include waiting for each
12389    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12390    /// individually (using separate call to `Sync` after each), or calling
12391    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12392    /// converted to a `BufferCollection` via
12393    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12394    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12395    /// the sync step and can create multiple tokens at once.
12396    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
12397        let _response = self.client.send_query::<
12398            fidl::encoding::EmptyPayload,
12399            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
12400        >(
12401            (),
12402            0x11ac2555cf575b54,
12403            fidl::encoding::DynamicFlags::FLEXIBLE,
12404            ___deadline,
12405        )?
12406        .into_result::<NodeMarker>("sync")?;
12407        Ok(_response)
12408    }
12409
12410    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12411    ///
12412    /// Normally a participant will convert a `BufferCollectionToken` into a
12413    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12414    /// `Release` via the token (and then close the channel immediately or
12415    /// shortly later in response to server closing the server end), which
12416    /// avoids causing buffer collection failure. Without a prior `Release`,
12417    /// closing the `BufferCollectionToken` client end will cause buffer
12418    /// collection failure.
12419    ///
12420    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12421    ///
12422    /// By default the server handles unexpected closure of a
12423    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12424    /// first) by failing the buffer collection. Partly this is to expedite
12425    /// closing VMO handles to reclaim memory when any participant fails. If a
12426    /// participant would like to cleanly close a `BufferCollection` without
12427    /// causing buffer collection failure, the participant can send `Release`
12428    /// before closing the `BufferCollection` client end. The `Release` can
12429    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12430    /// buffer collection won't require constraints from this node in order to
12431    /// allocate. If after `SetConstraints`, the constraints are retained and
12432    /// aggregated, despite the lack of `BufferCollection` connection at the
12433    /// time of constraints aggregation.
12434    ///
12435    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
12436    ///
12437    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
12438    /// end (without `Release` first) will trigger failure of the buffer
12439    /// collection. To close a `BufferCollectionTokenGroup` channel without
12440    /// failing the buffer collection, ensure that AllChildrenPresent() has been
12441    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
12442    /// client end.
12443    ///
12444    /// If `Release` occurs before
12445    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
12446    /// buffer collection will fail (triggered by reception of `Release` without
12447    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
12448    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
12449    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
12450    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
12451    /// close requires `AllChildrenPresent` (if not already sent), then
12452    /// `Release`, then close client end.
12453    ///
12454    /// If `Release` occurs after `AllChildrenPresent`, the children and all
12455    /// their constraints remain intact (just as they would if the
12456    /// `BufferCollectionTokenGroup` channel had remained open), and the client
12457    /// end close doesn't trigger buffer collection failure.
12458    ///
12459    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
12460    ///
12461    /// For brevity, the per-channel-protocol paragraphs above ignore the
12462    /// separate failure domain created by
12463    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
12464    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
12465    /// unexpectedly closes (without `Release` first) and that client end is
12466    /// under a failure domain, instead of failing the whole buffer collection,
12467    /// the failure domain is failed, but the buffer collection itself is
12468    /// isolated from failure of the failure domain. Such failure domains can be
12469    /// nested, in which case only the inner-most failure domain in which the
12470    /// `Node` resides fails.
12471    pub fn r#release(&self) -> Result<(), fidl::Error> {
12472        self.client.send::<fidl::encoding::EmptyPayload>(
12473            (),
12474            0x6a5cae7d6d6e04c6,
12475            fidl::encoding::DynamicFlags::FLEXIBLE,
12476        )
12477    }
12478
12479    /// Set a name for VMOs in this buffer collection.
12480    ///
12481    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
12482    /// will be truncated to fit. The name of the vmo will be suffixed with the
12483    /// buffer index within the collection (if the suffix fits within
12484    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
12485    /// listed in the inspect data.
12486    ///
12487    /// The name only affects VMOs allocated after the name is set; this call
12488    /// does not rename existing VMOs. If multiple clients set different names
12489    /// then the larger priority value will win. Setting a new name with the
12490    /// same priority as a prior name doesn't change the name.
12491    ///
12492    /// All table fields are currently required.
12493    ///
12494    /// + request `priority` The name is only set if this is the first `SetName`
12495    ///   or if `priority` is greater than any previous `priority` value in
12496    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
12497    /// + request `name` The name for VMOs created under this buffer collection.
12498    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
12499        self.client.send::<NodeSetNameRequest>(
12500            payload,
12501            0xb41f1624f48c1e9,
12502            fidl::encoding::DynamicFlags::FLEXIBLE,
12503        )
12504    }
12505
12506    /// Set information about the current client that can be used by sysmem to
12507    /// help diagnose leaking memory and allocation stalls waiting for a
12508    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
12509    ///
12510    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
12511    /// `Node`(s) derived from this `Node`, unless overriden by
12512    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
12513    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
12514    ///
12515    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
12516    /// `Allocator` is the most efficient way to ensure that all
12517    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
12518    /// set, and is also more efficient than separately sending the same debug
12519    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
12520    /// created [`fuchsia.sysmem2/Node`].
12521    ///
12522    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
12523    /// indicate which client is closing their channel first, leading to subtree
12524    /// failure (which can be normal if the purpose of the subtree is over, but
12525    /// if happening earlier than expected, the client-channel-specific name can
12526    /// help diagnose where the failure is first coming from, from sysmem's
12527    /// point of view).
12528    ///
12529    /// All table fields are currently required.
12530    ///
12531    /// + request `name` This can be an arbitrary string, but the current
12532    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
12533    /// + request `id` This can be an arbitrary id, but the current process ID
12534    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
12535    pub fn r#set_debug_client_info(
12536        &self,
12537        mut payload: &NodeSetDebugClientInfoRequest,
12538    ) -> Result<(), fidl::Error> {
12539        self.client.send::<NodeSetDebugClientInfoRequest>(
12540            payload,
12541            0x5cde8914608d99b1,
12542            fidl::encoding::DynamicFlags::FLEXIBLE,
12543        )
12544    }
12545
12546    /// Sysmem logs a warning if sysmem hasn't seen
12547    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
12548    /// within 5 seconds after creation of a new collection.
12549    ///
12550    /// Clients can call this method to change when the log is printed. If
12551    /// multiple client set the deadline, it's unspecified which deadline will
12552    /// take effect.
12553    ///
12554    /// In most cases the default works well.
12555    ///
12556    /// All table fields are currently required.
12557    ///
12558    /// + request `deadline` The time at which sysmem will start trying to log
12559    ///   the warning, unless all constraints are with sysmem by then.
12560    pub fn r#set_debug_timeout_log_deadline(
12561        &self,
12562        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12563    ) -> Result<(), fidl::Error> {
12564        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
12565            payload,
12566            0x716b0af13d5c0806,
12567            fidl::encoding::DynamicFlags::FLEXIBLE,
12568        )
12569    }
12570
12571    /// This enables verbose logging for the buffer collection.
12572    ///
12573    /// Verbose logging includes constraints set via
12574    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
12575    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
12576    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
12577    /// the tree of `Node`(s).
12578    ///
12579    /// Normally sysmem prints only a single line complaint when aggregation
12580    /// fails, with just the specific detailed reason that aggregation failed,
12581    /// with little surrounding context.  While this is often enough to diagnose
12582    /// a problem if only a small change was made and everything was working
12583    /// before the small change, it's often not particularly helpful for getting
12584    /// a new buffer collection to work for the first time.  Especially with
12585    /// more complex trees of nodes, involving things like
12586    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
12587    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
12588    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
12589    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
12590    /// looks like and why it's failing a logical allocation, or why a tree or
12591    /// subtree is failing sooner than expected.
12592    ///
12593    /// The intent of the extra logging is to be acceptable from a performance
12594    /// point of view, under the assumption that verbose logging is only enabled
12595    /// on a low number of buffer collections. If we're not tracking down a bug,
12596    /// we shouldn't send this message.
12597    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
12598        self.client.send::<fidl::encoding::EmptyPayload>(
12599            (),
12600            0x5209c77415b4dfad,
12601            fidl::encoding::DynamicFlags::FLEXIBLE,
12602        )
12603    }
12604
12605    /// This gets a handle that can be used as a parameter to
12606    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
12607    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
12608    /// client obtained this handle from this `Node`.
12609    ///
12610    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
12611    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
12612    /// despite the two calls typically being on different channels.
12613    ///
12614    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
12615    ///
12616    /// All table fields are currently required.
12617    ///
12618    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
12619    ///   different `Node` channel, to prove that the client obtained the handle
12620    ///   from this `Node`.
12621    pub fn r#get_node_ref(
12622        &self,
12623        ___deadline: zx::MonotonicInstant,
12624    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
12625        let _response = self.client.send_query::<
12626            fidl::encoding::EmptyPayload,
12627            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
12628        >(
12629            (),
12630            0x5b3d0e51614df053,
12631            fidl::encoding::DynamicFlags::FLEXIBLE,
12632            ___deadline,
12633        )?
12634        .into_result::<NodeMarker>("get_node_ref")?;
12635        Ok(_response)
12636    }
12637
12638    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
12639    /// rooted at a different child token of a common parent
12640    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
12641    /// passed-in `node_ref`.
12642    ///
12643    /// This call is for assisting with admission control de-duplication, and
12644    /// with debugging.
12645    ///
12646    /// The `node_ref` must be obtained using
12647    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
12648    ///
12649    /// The `node_ref` can be a duplicated handle; it's not necessary to call
12650    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
12651    ///
12652    /// If a calling token may not actually be a valid token at all due to a
12653    /// potentially hostile/untrusted provider of the token, call
12654    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
12655    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
12656    /// never responds due to a calling token not being a real token (not really
12657    /// talking to sysmem).  Another option is to call
12658    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
12659    /// which also validates the token along with converting it to a
12660    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
12661    ///
12662    /// All table fields are currently required.
12663    ///
12664    /// - response `is_alternate`
12665    ///   - true: The first parent node in common between the calling node and
12666    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
12667    ///     that the calling `Node` and the `node_ref` `Node` will not have both
12668    ///     their constraints apply - rather sysmem will choose one or the other
12669    ///     of the constraints - never both.  This is because only one child of
12670    ///     a `BufferCollectionTokenGroup` is selected during logical
12671    ///     allocation, with only that one child's subtree contributing to
12672    ///     constraints aggregation.
12673    ///   - false: The first parent node in common between the calling `Node`
12674    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
12675    ///     Currently, this means the first parent node in common is a
12676    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
12677    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
12678    ///     `Node` may have both their constraints apply during constraints
12679    ///     aggregation of the logical allocation, if both `Node`(s) are
12680    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
12681    ///     this case, there is no `BufferCollectionTokenGroup` that will
12682    ///     directly prevent the two `Node`(s) from both being selected and
12683    ///     their constraints both aggregated, but even when false, one or both
12684    ///     `Node`(s) may still be eliminated from consideration if one or both
12685    ///     `Node`(s) has a direct or indirect parent
12686    ///     `BufferCollectionTokenGroup` which selects a child subtree other
12687    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
12688    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
12689    ///   associated with the same buffer collection as the calling `Node`.
12690    ///   Another reason for this error is if the `node_ref` is an
12691    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
12692    ///   a real `node_ref` obtained from `GetNodeRef`.
12693    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
12694    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
12695    ///   the needed rights expected on a real `node_ref`.
12696    /// * No other failing status codes are returned by this call.  However,
12697    ///   sysmem may add additional codes in future, so the client should have
12698    ///   sensible default handling for any failing status code.
12699    pub fn r#is_alternate_for(
12700        &self,
12701        mut payload: NodeIsAlternateForRequest,
12702        ___deadline: zx::MonotonicInstant,
12703    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
12704        let _response = self.client.send_query::<
12705            NodeIsAlternateForRequest,
12706            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
12707        >(
12708            &mut payload,
12709            0x3a58e00157e0825,
12710            fidl::encoding::DynamicFlags::FLEXIBLE,
12711            ___deadline,
12712        )?
12713        .into_result::<NodeMarker>("is_alternate_for")?;
12714        Ok(_response.map(|x| x))
12715    }
12716
12717    /// Get the buffer collection ID. This ID is also available from
12718    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
12719    /// within the collection).
12720    ///
12721    /// This call is mainly useful in situations where we can't convey a
12722    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
12723    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
12724    /// handle, which can be joined back up with a `BufferCollection` client end
12725    /// that was created via a different path. Prefer to convey a
12726    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
12727    ///
12728    /// Trusting a `buffer_collection_id` value from a source other than sysmem
12729    /// is analogous to trusting a koid value from a source other than zircon.
12730    /// Both should be avoided unless really necessary, and both require
12731    /// caution. In some situations it may be reasonable to refer to a
12732    /// pre-established `BufferCollection` by `buffer_collection_id` via a
12733    /// protocol for efficiency reasons, but an incoming value purporting to be
12734    /// a `buffer_collection_id` is not sufficient alone to justify granting the
12735    /// sender of the `buffer_collection_id` any capability. The sender must
12736    /// first prove to a receiver that the sender has/had a VMO or has/had a
12737    /// `BufferCollectionToken` to the same collection by sending a handle that
12738    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
12739    /// `buffer_collection_id` value. The receiver should take care to avoid
12740    /// assuming that a sender had a `BufferCollectionToken` in cases where the
12741    /// sender has only proven that the sender had a VMO.
12742    ///
12743    /// - response `buffer_collection_id` This ID is unique per buffer
12744    ///   collection per boot. Each buffer is uniquely identified by the
12745    ///   `buffer_collection_id` and `buffer_index` together.
12746    pub fn r#get_buffer_collection_id(
12747        &self,
12748        ___deadline: zx::MonotonicInstant,
12749    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
12750        let _response = self.client.send_query::<
12751            fidl::encoding::EmptyPayload,
12752            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
12753        >(
12754            (),
12755            0x77d19a494b78ba8c,
12756            fidl::encoding::DynamicFlags::FLEXIBLE,
12757            ___deadline,
12758        )?
12759        .into_result::<NodeMarker>("get_buffer_collection_id")?;
12760        Ok(_response)
12761    }
12762
12763    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
12764    /// created after this message to weak, which means that a client's `Node`
12765    /// client end (or a child created after this message) is not alone
12766    /// sufficient to keep allocated VMOs alive.
12767    ///
12768    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
12769    /// `close_weak_asap`.
12770    ///
12771    /// This message is only permitted before the `Node` becomes ready for
12772    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
12773    ///   * `BufferCollectionToken`: any time
12774    ///   * `BufferCollection`: before `SetConstraints`
12775    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
12776    ///
12777    /// Currently, no conversion from strong `Node` to weak `Node` after ready
12778    /// for allocation is provided, but a client can simulate that by creating
12779    /// an additional `Node` before allocation and setting that additional
12780    /// `Node` to weak, and then potentially at some point later sending
12781    /// `Release` and closing the client end of the client's strong `Node`, but
12782    /// keeping the client's weak `Node`.
12783    ///
12784    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
12785    /// collection failure (all `Node` client end(s) will see
12786    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
12787    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
12788    /// this situation until all `Node`(s) are ready for allocation. For initial
12789    /// allocation to succeed, at least one strong `Node` is required to exist
12790    /// at allocation time, but after that client receives VMO handles, that
12791    /// client can `BufferCollection.Release` and close the client end without
12792    /// causing this type of failure.
12793    ///
12794    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
12795    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
12796    /// separately as appropriate.
12797    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
12798        self.client.send::<fidl::encoding::EmptyPayload>(
12799            (),
12800            0x22dd3ea514eeffe1,
12801            fidl::encoding::DynamicFlags::FLEXIBLE,
12802        )
12803    }
12804
12805    /// This indicates to sysmem that the client is prepared to pay attention to
12806    /// `close_weak_asap`.
12807    ///
12808    /// If sent, this message must be before
12809    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
12810    ///
12811    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
12812    /// send this message before `WaitForAllBuffersAllocated`, or a parent
12813    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
12814    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
12815    /// trigger buffer collection failure.
12816    ///
12817    /// This message is necessary because weak sysmem VMOs have not always been
12818    /// a thing, so older clients are not aware of the need to pay attention to
12819    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
12820    /// sysmem weak VMO handles asap. By having this message and requiring
12821    /// participants to indicate their acceptance of this aspect of the overall
12822    /// protocol, we avoid situations where an older client is delivered a weak
12823    /// VMO without any way for sysmem to get that VMO to close quickly later
12824    /// (and on a per-buffer basis).
12825    ///
12826    /// A participant that doesn't handle `close_weak_asap` and also doesn't
12827    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
12828    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
12829    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
12830    /// same participant has a child/delegate which does retrieve VMOs, that
12831    /// child/delegate will need to send `SetWeakOk` before
12832    /// `WaitForAllBuffersAllocated`.
12833    ///
12834    /// + request `for_child_nodes_also` If present and true, this means direct
12835    ///   child nodes of this node created after this message plus all
12836    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
12837    ///   those nodes. Any child node of this node that was created before this
12838    ///   message is not included. This setting is "sticky" in the sense that a
12839    ///   subsequent `SetWeakOk` without this bool set to true does not reset
12840    ///   the server-side bool. If this creates a problem for a participant, a
12841    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
12842    ///   tokens instead, as appropriate. A participant should only set
12843    ///   `for_child_nodes_also` true if the participant can really promise to
12844    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
12845    ///   weak VMO handles held by participants holding the corresponding child
12846    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
12847    ///   which are using sysmem(1) can be weak, despite the clients of those
12848    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
12849    ///   direct way to find out about `close_weak_asap`. This only applies to
12850    ///   descendents of this `Node` which are using sysmem(1), not to this
12851    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
12852    ///   token, which will fail allocation unless an ancestor of this `Node`
12853    ///   specified `for_child_nodes_also` true.
12854    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
12855        self.client.send::<NodeSetWeakOkRequest>(
12856            &mut payload,
12857            0x38a44fc4d7724be9,
12858            fidl::encoding::DynamicFlags::FLEXIBLE,
12859        )
12860    }
12861
12862    /// The server_end will be closed after this `Node` and any child nodes have
12863    /// have released their buffer counts, making those counts available for
12864    /// reservation by a different `Node` via
12865    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
12866    ///
12867    /// The `Node` buffer counts may not be released until the entire tree of
12868    /// `Node`(s) is closed or failed, because
12869    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
12870    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
12871    /// `Node` buffer counts remain reserved until the orphaned node is later
12872    /// cleaned up.
12873    ///
12874    /// If the `Node` exceeds a fairly large number of attached eventpair server
12875    /// ends, a log message will indicate this and the `Node` (and the
12876    /// appropriate) sub-tree will fail.
12877    ///
12878    /// The `server_end` will remain open when
12879    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
12880    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
12881    /// [`fuchsia.sysmem2/BufferCollection`].
12882    ///
12883    /// This message can also be used with a
12884    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
12885    pub fn r#attach_node_tracking(
12886        &self,
12887        mut payload: NodeAttachNodeTrackingRequest,
12888    ) -> Result<(), fidl::Error> {
12889        self.client.send::<NodeAttachNodeTrackingRequest>(
12890            &mut payload,
12891            0x3f22f2a293d3cdac,
12892            fidl::encoding::DynamicFlags::FLEXIBLE,
12893        )
12894    }
12895}
12896
12897#[cfg(target_os = "fuchsia")]
12898impl From<NodeSynchronousProxy> for zx::NullableHandle {
12899    fn from(value: NodeSynchronousProxy) -> Self {
12900        value.into_channel().into()
12901    }
12902}
12903
12904#[cfg(target_os = "fuchsia")]
12905impl From<fidl::Channel> for NodeSynchronousProxy {
12906    fn from(value: fidl::Channel) -> Self {
12907        Self::new(value)
12908    }
12909}
12910
12911#[cfg(target_os = "fuchsia")]
12912impl fidl::endpoints::FromClient for NodeSynchronousProxy {
12913    type Protocol = NodeMarker;
12914
12915    fn from_client(value: fidl::endpoints::ClientEnd<NodeMarker>) -> Self {
12916        Self::new(value.into_channel())
12917    }
12918}
12919
12920#[derive(Debug, Clone)]
12921pub struct NodeProxy {
12922    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
12923}
12924
12925impl fidl::endpoints::Proxy for NodeProxy {
12926    type Protocol = NodeMarker;
12927
12928    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
12929        Self::new(inner)
12930    }
12931
12932    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
12933        self.client.into_channel().map_err(|client| Self { client })
12934    }
12935
12936    fn as_channel(&self) -> &::fidl::AsyncChannel {
12937        self.client.as_channel()
12938    }
12939}
12940
12941impl NodeProxy {
12942    /// Create a new Proxy for fuchsia.sysmem2/Node.
12943    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
12944        let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12945        Self { client: fidl::client::Client::new(channel, protocol_name) }
12946    }
12947
12948    /// Get a Stream of events from the remote end of the protocol.
12949    ///
12950    /// # Panics
12951    ///
12952    /// Panics if the event stream was already taken.
12953    pub fn take_event_stream(&self) -> NodeEventStream {
12954        NodeEventStream { event_receiver: self.client.take_event_receiver() }
12955    }
12956
12957    /// Ensure that previous messages have been received server side. This is
12958    /// particularly useful after previous messages that created new tokens,
12959    /// because a token must be known to the sysmem server before sending the
12960    /// token to another participant.
12961    ///
12962    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12963    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12964    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12965    /// to mitigate the possibility of a hostile/fake
12966    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12967    /// Another way is to pass the token to
12968    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12969    /// the token as part of exchanging it for a
12970    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12971    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12972    /// of stalling.
12973    ///
12974    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12975    /// and then starting and completing a `Sync`, it's then safe to send the
12976    /// `BufferCollectionToken` client ends to other participants knowing the
12977    /// server will recognize the tokens when they're sent by the other
12978    /// participants to sysmem in a
12979    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12980    /// efficient way to create tokens while avoiding unnecessary round trips.
12981    ///
12982    /// Other options include waiting for each
12983    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12984    /// individually (using separate call to `Sync` after each), or calling
12985    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12986    /// converted to a `BufferCollection` via
12987    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12988    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12989    /// the sync step and can create multiple tokens at once.
12990    pub fn r#sync(
12991        &self,
12992    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
12993        NodeProxyInterface::r#sync(self)
12994    }
12995
12996    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12997    ///
12998    /// Normally a participant will convert a `BufferCollectionToken` into a
12999    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
13000    /// `Release` via the token (and then close the channel immediately or
13001    /// shortly later in response to server closing the server end), which
13002    /// avoids causing buffer collection failure. Without a prior `Release`,
13003    /// closing the `BufferCollectionToken` client end will cause buffer
13004    /// collection failure.
13005    ///
13006    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
13007    ///
13008    /// By default the server handles unexpected closure of a
13009    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
13010    /// first) by failing the buffer collection. Partly this is to expedite
13011    /// closing VMO handles to reclaim memory when any participant fails. If a
13012    /// participant would like to cleanly close a `BufferCollection` without
13013    /// causing buffer collection failure, the participant can send `Release`
13014    /// before closing the `BufferCollection` client end. The `Release` can
13015    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
13016    /// buffer collection won't require constraints from this node in order to
13017    /// allocate. If after `SetConstraints`, the constraints are retained and
13018    /// aggregated, despite the lack of `BufferCollection` connection at the
13019    /// time of constraints aggregation.
13020    ///
13021    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13022    ///
13023    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13024    /// end (without `Release` first) will trigger failure of the buffer
13025    /// collection. To close a `BufferCollectionTokenGroup` channel without
13026    /// failing the buffer collection, ensure that AllChildrenPresent() has been
13027    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13028    /// client end.
13029    ///
13030    /// If `Release` occurs before
13031    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13032    /// buffer collection will fail (triggered by reception of `Release` without
13033    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13034    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13035    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13036    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13037    /// close requires `AllChildrenPresent` (if not already sent), then
13038    /// `Release`, then close client end.
13039    ///
13040    /// If `Release` occurs after `AllChildrenPresent`, the children and all
13041    /// their constraints remain intact (just as they would if the
13042    /// `BufferCollectionTokenGroup` channel had remained open), and the client
13043    /// end close doesn't trigger buffer collection failure.
13044    ///
13045    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
13046    ///
13047    /// For brevity, the per-channel-protocol paragraphs above ignore the
13048    /// separate failure domain created by
13049    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
13050    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
13051    /// unexpectedly closes (without `Release` first) and that client end is
13052    /// under a failure domain, instead of failing the whole buffer collection,
13053    /// the failure domain is failed, but the buffer collection itself is
13054    /// isolated from failure of the failure domain. Such failure domains can be
13055    /// nested, in which case only the inner-most failure domain in which the
13056    /// `Node` resides fails.
13057    pub fn r#release(&self) -> Result<(), fidl::Error> {
13058        NodeProxyInterface::r#release(self)
13059    }
13060
13061    /// Set a name for VMOs in this buffer collection.
13062    ///
13063    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
13064    /// will be truncated to fit. The name of the vmo will be suffixed with the
13065    /// buffer index within the collection (if the suffix fits within
13066    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
13067    /// listed in the inspect data.
13068    ///
13069    /// The name only affects VMOs allocated after the name is set; this call
13070    /// does not rename existing VMOs. If multiple clients set different names
13071    /// then the larger priority value will win. Setting a new name with the
13072    /// same priority as a prior name doesn't change the name.
13073    ///
13074    /// All table fields are currently required.
13075    ///
13076    /// + request `priority` The name is only set if this is the first `SetName`
13077    ///   or if `priority` is greater than any previous `priority` value in
13078    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
13079    /// + request `name` The name for VMOs created under this buffer collection.
13080    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13081        NodeProxyInterface::r#set_name(self, payload)
13082    }
13083
13084    /// Set information about the current client that can be used by sysmem to
13085    /// help diagnose leaking memory and allocation stalls waiting for a
13086    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
13087    ///
13088    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
13089    /// `Node`(s) derived from this `Node`, unless overriden by
13090    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
13091    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
13092    ///
13093    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
13094    /// `Allocator` is the most efficient way to ensure that all
13095    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
13096    /// set, and is also more efficient than separately sending the same debug
13097    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
13098    /// created [`fuchsia.sysmem2/Node`].
13099    ///
13100    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
13101    /// indicate which client is closing their channel first, leading to subtree
13102    /// failure (which can be normal if the purpose of the subtree is over, but
13103    /// if happening earlier than expected, the client-channel-specific name can
13104    /// help diagnose where the failure is first coming from, from sysmem's
13105    /// point of view).
13106    ///
13107    /// All table fields are currently required.
13108    ///
13109    /// + request `name` This can be an arbitrary string, but the current
13110    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
13111    /// + request `id` This can be an arbitrary id, but the current process ID
13112    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
13113    pub fn r#set_debug_client_info(
13114        &self,
13115        mut payload: &NodeSetDebugClientInfoRequest,
13116    ) -> Result<(), fidl::Error> {
13117        NodeProxyInterface::r#set_debug_client_info(self, payload)
13118    }
13119
13120    /// Sysmem logs a warning if sysmem hasn't seen
13121    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
13122    /// within 5 seconds after creation of a new collection.
13123    ///
13124    /// Clients can call this method to change when the log is printed. If
13125    /// multiple client set the deadline, it's unspecified which deadline will
13126    /// take effect.
13127    ///
13128    /// In most cases the default works well.
13129    ///
13130    /// All table fields are currently required.
13131    ///
13132    /// + request `deadline` The time at which sysmem will start trying to log
13133    ///   the warning, unless all constraints are with sysmem by then.
13134    pub fn r#set_debug_timeout_log_deadline(
13135        &self,
13136        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13137    ) -> Result<(), fidl::Error> {
13138        NodeProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
13139    }
13140
13141    /// This enables verbose logging for the buffer collection.
13142    ///
13143    /// Verbose logging includes constraints set via
13144    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
13145    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
13146    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
13147    /// the tree of `Node`(s).
13148    ///
13149    /// Normally sysmem prints only a single line complaint when aggregation
13150    /// fails, with just the specific detailed reason that aggregation failed,
13151    /// with little surrounding context.  While this is often enough to diagnose
13152    /// a problem if only a small change was made and everything was working
13153    /// before the small change, it's often not particularly helpful for getting
13154    /// a new buffer collection to work for the first time.  Especially with
13155    /// more complex trees of nodes, involving things like
13156    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
13157    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
13158    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
13159    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
13160    /// looks like and why it's failing a logical allocation, or why a tree or
13161    /// subtree is failing sooner than expected.
13162    ///
13163    /// The intent of the extra logging is to be acceptable from a performance
13164    /// point of view, under the assumption that verbose logging is only enabled
13165    /// on a low number of buffer collections. If we're not tracking down a bug,
13166    /// we shouldn't send this message.
13167    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13168        NodeProxyInterface::r#set_verbose_logging(self)
13169    }
13170
13171    /// This gets a handle that can be used as a parameter to
13172    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
13173    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
13174    /// client obtained this handle from this `Node`.
13175    ///
13176    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
13177    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
13178    /// despite the two calls typically being on different channels.
13179    ///
13180    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
13181    ///
13182    /// All table fields are currently required.
13183    ///
13184    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
13185    ///   different `Node` channel, to prove that the client obtained the handle
13186    ///   from this `Node`.
13187    pub fn r#get_node_ref(
13188        &self,
13189    ) -> fidl::client::QueryResponseFut<
13190        NodeGetNodeRefResponse,
13191        fidl::encoding::DefaultFuchsiaResourceDialect,
13192    > {
13193        NodeProxyInterface::r#get_node_ref(self)
13194    }
13195
13196    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
13197    /// rooted at a different child token of a common parent
13198    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
13199    /// passed-in `node_ref`.
13200    ///
13201    /// This call is for assisting with admission control de-duplication, and
13202    /// with debugging.
13203    ///
13204    /// The `node_ref` must be obtained using
13205    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
13206    ///
13207    /// The `node_ref` can be a duplicated handle; it's not necessary to call
13208    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
13209    ///
13210    /// If a calling token may not actually be a valid token at all due to a
13211    /// potentially hostile/untrusted provider of the token, call
13212    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
13213    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
13214    /// never responds due to a calling token not being a real token (not really
13215    /// talking to sysmem).  Another option is to call
13216    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
13217    /// which also validates the token along with converting it to a
13218    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
13219    ///
13220    /// All table fields are currently required.
13221    ///
13222    /// - response `is_alternate`
13223    ///   - true: The first parent node in common between the calling node and
13224    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
13225    ///     that the calling `Node` and the `node_ref` `Node` will not have both
13226    ///     their constraints apply - rather sysmem will choose one or the other
13227    ///     of the constraints - never both.  This is because only one child of
13228    ///     a `BufferCollectionTokenGroup` is selected during logical
13229    ///     allocation, with only that one child's subtree contributing to
13230    ///     constraints aggregation.
13231    ///   - false: The first parent node in common between the calling `Node`
13232    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
13233    ///     Currently, this means the first parent node in common is a
13234    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
13235    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
13236    ///     `Node` may have both their constraints apply during constraints
13237    ///     aggregation of the logical allocation, if both `Node`(s) are
13238    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
13239    ///     this case, there is no `BufferCollectionTokenGroup` that will
13240    ///     directly prevent the two `Node`(s) from both being selected and
13241    ///     their constraints both aggregated, but even when false, one or both
13242    ///     `Node`(s) may still be eliminated from consideration if one or both
13243    ///     `Node`(s) has a direct or indirect parent
13244    ///     `BufferCollectionTokenGroup` which selects a child subtree other
13245    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
13246    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
13247    ///   associated with the same buffer collection as the calling `Node`.
13248    ///   Another reason for this error is if the `node_ref` is an
13249    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
13250    ///   a real `node_ref` obtained from `GetNodeRef`.
13251    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
13252    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
13253    ///   the needed rights expected on a real `node_ref`.
13254    /// * No other failing status codes are returned by this call.  However,
13255    ///   sysmem may add additional codes in future, so the client should have
13256    ///   sensible default handling for any failing status code.
13257    pub fn r#is_alternate_for(
13258        &self,
13259        mut payload: NodeIsAlternateForRequest,
13260    ) -> fidl::client::QueryResponseFut<
13261        NodeIsAlternateForResult,
13262        fidl::encoding::DefaultFuchsiaResourceDialect,
13263    > {
13264        NodeProxyInterface::r#is_alternate_for(self, payload)
13265    }
13266
13267    /// Get the buffer collection ID. This ID is also available from
13268    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
13269    /// within the collection).
13270    ///
13271    /// This call is mainly useful in situations where we can't convey a
13272    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
13273    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
13274    /// handle, which can be joined back up with a `BufferCollection` client end
13275    /// that was created via a different path. Prefer to convey a
13276    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
13277    ///
13278    /// Trusting a `buffer_collection_id` value from a source other than sysmem
13279    /// is analogous to trusting a koid value from a source other than zircon.
13280    /// Both should be avoided unless really necessary, and both require
13281    /// caution. In some situations it may be reasonable to refer to a
13282    /// pre-established `BufferCollection` by `buffer_collection_id` via a
13283    /// protocol for efficiency reasons, but an incoming value purporting to be
13284    /// a `buffer_collection_id` is not sufficient alone to justify granting the
13285    /// sender of the `buffer_collection_id` any capability. The sender must
13286    /// first prove to a receiver that the sender has/had a VMO or has/had a
13287    /// `BufferCollectionToken` to the same collection by sending a handle that
13288    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
13289    /// `buffer_collection_id` value. The receiver should take care to avoid
13290    /// assuming that a sender had a `BufferCollectionToken` in cases where the
13291    /// sender has only proven that the sender had a VMO.
13292    ///
13293    /// - response `buffer_collection_id` This ID is unique per buffer
13294    ///   collection per boot. Each buffer is uniquely identified by the
13295    ///   `buffer_collection_id` and `buffer_index` together.
13296    pub fn r#get_buffer_collection_id(
13297        &self,
13298    ) -> fidl::client::QueryResponseFut<
13299        NodeGetBufferCollectionIdResponse,
13300        fidl::encoding::DefaultFuchsiaResourceDialect,
13301    > {
13302        NodeProxyInterface::r#get_buffer_collection_id(self)
13303    }
13304
13305    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
13306    /// created after this message to weak, which means that a client's `Node`
13307    /// client end (or a child created after this message) is not alone
13308    /// sufficient to keep allocated VMOs alive.
13309    ///
13310    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
13311    /// `close_weak_asap`.
13312    ///
13313    /// This message is only permitted before the `Node` becomes ready for
13314    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
13315    ///   * `BufferCollectionToken`: any time
13316    ///   * `BufferCollection`: before `SetConstraints`
13317    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
13318    ///
13319    /// Currently, no conversion from strong `Node` to weak `Node` after ready
13320    /// for allocation is provided, but a client can simulate that by creating
13321    /// an additional `Node` before allocation and setting that additional
13322    /// `Node` to weak, and then potentially at some point later sending
13323    /// `Release` and closing the client end of the client's strong `Node`, but
13324    /// keeping the client's weak `Node`.
13325    ///
13326    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
13327    /// collection failure (all `Node` client end(s) will see
13328    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
13329    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
13330    /// this situation until all `Node`(s) are ready for allocation. For initial
13331    /// allocation to succeed, at least one strong `Node` is required to exist
13332    /// at allocation time, but after that client receives VMO handles, that
13333    /// client can `BufferCollection.Release` and close the client end without
13334    /// causing this type of failure.
13335    ///
13336    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
13337    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
13338    /// separately as appropriate.
13339    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
13340        NodeProxyInterface::r#set_weak(self)
13341    }
13342
13343    /// This indicates to sysmem that the client is prepared to pay attention to
13344    /// `close_weak_asap`.
13345    ///
13346    /// If sent, this message must be before
13347    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
13348    ///
13349    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
13350    /// send this message before `WaitForAllBuffersAllocated`, or a parent
13351    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
13352    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
13353    /// trigger buffer collection failure.
13354    ///
13355    /// This message is necessary because weak sysmem VMOs have not always been
13356    /// a thing, so older clients are not aware of the need to pay attention to
13357    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
13358    /// sysmem weak VMO handles asap. By having this message and requiring
13359    /// participants to indicate their acceptance of this aspect of the overall
13360    /// protocol, we avoid situations where an older client is delivered a weak
13361    /// VMO without any way for sysmem to get that VMO to close quickly later
13362    /// (and on a per-buffer basis).
13363    ///
13364    /// A participant that doesn't handle `close_weak_asap` and also doesn't
13365    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
13366    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
13367    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
13368    /// same participant has a child/delegate which does retrieve VMOs, that
13369    /// child/delegate will need to send `SetWeakOk` before
13370    /// `WaitForAllBuffersAllocated`.
13371    ///
13372    /// + request `for_child_nodes_also` If present and true, this means direct
13373    ///   child nodes of this node created after this message plus all
13374    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
13375    ///   those nodes. Any child node of this node that was created before this
13376    ///   message is not included. This setting is "sticky" in the sense that a
13377    ///   subsequent `SetWeakOk` without this bool set to true does not reset
13378    ///   the server-side bool. If this creates a problem for a participant, a
13379    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
13380    ///   tokens instead, as appropriate. A participant should only set
13381    ///   `for_child_nodes_also` true if the participant can really promise to
13382    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
13383    ///   weak VMO handles held by participants holding the corresponding child
13384    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
13385    ///   which are using sysmem(1) can be weak, despite the clients of those
13386    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
13387    ///   direct way to find out about `close_weak_asap`. This only applies to
13388    ///   descendents of this `Node` which are using sysmem(1), not to this
13389    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
13390    ///   token, which will fail allocation unless an ancestor of this `Node`
13391    ///   specified `for_child_nodes_also` true.
13392    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13393        NodeProxyInterface::r#set_weak_ok(self, payload)
13394    }
13395
13396    /// The server_end will be closed after this `Node` and any child nodes have
13397    /// have released their buffer counts, making those counts available for
13398    /// reservation by a different `Node` via
13399    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
13400    ///
13401    /// The `Node` buffer counts may not be released until the entire tree of
13402    /// `Node`(s) is closed or failed, because
13403    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
13404    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
13405    /// `Node` buffer counts remain reserved until the orphaned node is later
13406    /// cleaned up.
13407    ///
13408    /// If the `Node` exceeds a fairly large number of attached eventpair server
13409    /// ends, a log message will indicate this and the `Node` (and the
13410    /// appropriate) sub-tree will fail.
13411    ///
13412    /// The `server_end` will remain open when
13413    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
13414    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
13415    /// [`fuchsia.sysmem2/BufferCollection`].
13416    ///
13417    /// This message can also be used with a
13418    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
13419    pub fn r#attach_node_tracking(
13420        &self,
13421        mut payload: NodeAttachNodeTrackingRequest,
13422    ) -> Result<(), fidl::Error> {
13423        NodeProxyInterface::r#attach_node_tracking(self, payload)
13424    }
13425}
13426
13427impl NodeProxyInterface for NodeProxy {
13428    type SyncResponseFut =
13429        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
13430    fn r#sync(&self) -> Self::SyncResponseFut {
13431        fn _decode(
13432            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13433        ) -> Result<(), fidl::Error> {
13434            let _response = fidl::client::decode_transaction_body::<
13435                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
13436                fidl::encoding::DefaultFuchsiaResourceDialect,
13437                0x11ac2555cf575b54,
13438            >(_buf?)?
13439            .into_result::<NodeMarker>("sync")?;
13440            Ok(_response)
13441        }
13442        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
13443            (),
13444            0x11ac2555cf575b54,
13445            fidl::encoding::DynamicFlags::FLEXIBLE,
13446            _decode,
13447        )
13448    }
13449
13450    fn r#release(&self) -> Result<(), fidl::Error> {
13451        self.client.send::<fidl::encoding::EmptyPayload>(
13452            (),
13453            0x6a5cae7d6d6e04c6,
13454            fidl::encoding::DynamicFlags::FLEXIBLE,
13455        )
13456    }
13457
13458    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13459        self.client.send::<NodeSetNameRequest>(
13460            payload,
13461            0xb41f1624f48c1e9,
13462            fidl::encoding::DynamicFlags::FLEXIBLE,
13463        )
13464    }
13465
13466    fn r#set_debug_client_info(
13467        &self,
13468        mut payload: &NodeSetDebugClientInfoRequest,
13469    ) -> Result<(), fidl::Error> {
13470        self.client.send::<NodeSetDebugClientInfoRequest>(
13471            payload,
13472            0x5cde8914608d99b1,
13473            fidl::encoding::DynamicFlags::FLEXIBLE,
13474        )
13475    }
13476
13477    fn r#set_debug_timeout_log_deadline(
13478        &self,
13479        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13480    ) -> Result<(), fidl::Error> {
13481        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
13482            payload,
13483            0x716b0af13d5c0806,
13484            fidl::encoding::DynamicFlags::FLEXIBLE,
13485        )
13486    }
13487
13488    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13489        self.client.send::<fidl::encoding::EmptyPayload>(
13490            (),
13491            0x5209c77415b4dfad,
13492            fidl::encoding::DynamicFlags::FLEXIBLE,
13493        )
13494    }
13495
13496    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
13497        NodeGetNodeRefResponse,
13498        fidl::encoding::DefaultFuchsiaResourceDialect,
13499    >;
13500    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
13501        fn _decode(
13502            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13503        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
13504            let _response = fidl::client::decode_transaction_body::<
13505                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
13506                fidl::encoding::DefaultFuchsiaResourceDialect,
13507                0x5b3d0e51614df053,
13508            >(_buf?)?
13509            .into_result::<NodeMarker>("get_node_ref")?;
13510            Ok(_response)
13511        }
13512        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
13513            (),
13514            0x5b3d0e51614df053,
13515            fidl::encoding::DynamicFlags::FLEXIBLE,
13516            _decode,
13517        )
13518    }
13519
13520    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
13521        NodeIsAlternateForResult,
13522        fidl::encoding::DefaultFuchsiaResourceDialect,
13523    >;
13524    fn r#is_alternate_for(
13525        &self,
13526        mut payload: NodeIsAlternateForRequest,
13527    ) -> Self::IsAlternateForResponseFut {
13528        fn _decode(
13529            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13530        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
13531            let _response = fidl::client::decode_transaction_body::<
13532                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
13533                fidl::encoding::DefaultFuchsiaResourceDialect,
13534                0x3a58e00157e0825,
13535            >(_buf?)?
13536            .into_result::<NodeMarker>("is_alternate_for")?;
13537            Ok(_response.map(|x| x))
13538        }
13539        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
13540            &mut payload,
13541            0x3a58e00157e0825,
13542            fidl::encoding::DynamicFlags::FLEXIBLE,
13543            _decode,
13544        )
13545    }
13546
13547    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
13548        NodeGetBufferCollectionIdResponse,
13549        fidl::encoding::DefaultFuchsiaResourceDialect,
13550    >;
13551    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
13552        fn _decode(
13553            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13554        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
13555            let _response = fidl::client::decode_transaction_body::<
13556                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
13557                fidl::encoding::DefaultFuchsiaResourceDialect,
13558                0x77d19a494b78ba8c,
13559            >(_buf?)?
13560            .into_result::<NodeMarker>("get_buffer_collection_id")?;
13561            Ok(_response)
13562        }
13563        self.client.send_query_and_decode::<
13564            fidl::encoding::EmptyPayload,
13565            NodeGetBufferCollectionIdResponse,
13566        >(
13567            (),
13568            0x77d19a494b78ba8c,
13569            fidl::encoding::DynamicFlags::FLEXIBLE,
13570            _decode,
13571        )
13572    }
13573
13574    fn r#set_weak(&self) -> Result<(), fidl::Error> {
13575        self.client.send::<fidl::encoding::EmptyPayload>(
13576            (),
13577            0x22dd3ea514eeffe1,
13578            fidl::encoding::DynamicFlags::FLEXIBLE,
13579        )
13580    }
13581
13582    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13583        self.client.send::<NodeSetWeakOkRequest>(
13584            &mut payload,
13585            0x38a44fc4d7724be9,
13586            fidl::encoding::DynamicFlags::FLEXIBLE,
13587        )
13588    }
13589
13590    fn r#attach_node_tracking(
13591        &self,
13592        mut payload: NodeAttachNodeTrackingRequest,
13593    ) -> Result<(), fidl::Error> {
13594        self.client.send::<NodeAttachNodeTrackingRequest>(
13595            &mut payload,
13596            0x3f22f2a293d3cdac,
13597            fidl::encoding::DynamicFlags::FLEXIBLE,
13598        )
13599    }
13600}
13601
13602pub struct NodeEventStream {
13603    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
13604}
13605
13606impl std::marker::Unpin for NodeEventStream {}
13607
13608impl futures::stream::FusedStream for NodeEventStream {
13609    fn is_terminated(&self) -> bool {
13610        self.event_receiver.is_terminated()
13611    }
13612}
13613
13614impl futures::Stream for NodeEventStream {
13615    type Item = Result<NodeEvent, fidl::Error>;
13616
13617    fn poll_next(
13618        mut self: std::pin::Pin<&mut Self>,
13619        cx: &mut std::task::Context<'_>,
13620    ) -> std::task::Poll<Option<Self::Item>> {
13621        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
13622            &mut self.event_receiver,
13623            cx
13624        )?) {
13625            Some(buf) => std::task::Poll::Ready(Some(NodeEvent::decode(buf))),
13626            None => std::task::Poll::Ready(None),
13627        }
13628    }
13629}
13630
13631#[derive(Debug)]
13632pub enum NodeEvent {
13633    #[non_exhaustive]
13634    _UnknownEvent {
13635        /// Ordinal of the event that was sent.
13636        ordinal: u64,
13637    },
13638}
13639
13640impl NodeEvent {
13641    /// Decodes a message buffer as a [`NodeEvent`].
13642    fn decode(
13643        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
13644    ) -> Result<NodeEvent, fidl::Error> {
13645        let (bytes, _handles) = buf.split_mut();
13646        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13647        debug_assert_eq!(tx_header.tx_id, 0);
13648        match tx_header.ordinal {
13649            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
13650                Ok(NodeEvent::_UnknownEvent { ordinal: tx_header.ordinal })
13651            }
13652            _ => Err(fidl::Error::UnknownOrdinal {
13653                ordinal: tx_header.ordinal,
13654                protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13655            }),
13656        }
13657    }
13658}
13659
13660/// A Stream of incoming requests for fuchsia.sysmem2/Node.
13661pub struct NodeRequestStream {
13662    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13663    is_terminated: bool,
13664}
13665
13666impl std::marker::Unpin for NodeRequestStream {}
13667
13668impl futures::stream::FusedStream for NodeRequestStream {
13669    fn is_terminated(&self) -> bool {
13670        self.is_terminated
13671    }
13672}
13673
13674impl fidl::endpoints::RequestStream for NodeRequestStream {
13675    type Protocol = NodeMarker;
13676    type ControlHandle = NodeControlHandle;
13677
13678    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
13679        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
13680    }
13681
13682    fn control_handle(&self) -> Self::ControlHandle {
13683        NodeControlHandle { inner: self.inner.clone() }
13684    }
13685
13686    fn into_inner(
13687        self,
13688    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
13689    {
13690        (self.inner, self.is_terminated)
13691    }
13692
13693    fn from_inner(
13694        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13695        is_terminated: bool,
13696    ) -> Self {
13697        Self { inner, is_terminated }
13698    }
13699}
13700
13701impl futures::Stream for NodeRequestStream {
13702    type Item = Result<NodeRequest, fidl::Error>;
13703
13704    fn poll_next(
13705        mut self: std::pin::Pin<&mut Self>,
13706        cx: &mut std::task::Context<'_>,
13707    ) -> std::task::Poll<Option<Self::Item>> {
13708        let this = &mut *self;
13709        if this.inner.check_shutdown(cx) {
13710            this.is_terminated = true;
13711            return std::task::Poll::Ready(None);
13712        }
13713        if this.is_terminated {
13714            panic!("polled NodeRequestStream after completion");
13715        }
13716        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
13717            |bytes, handles| {
13718                match this.inner.channel().read_etc(cx, bytes, handles) {
13719                    std::task::Poll::Ready(Ok(())) => {}
13720                    std::task::Poll::Pending => return std::task::Poll::Pending,
13721                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
13722                        this.is_terminated = true;
13723                        return std::task::Poll::Ready(None);
13724                    }
13725                    std::task::Poll::Ready(Err(e)) => {
13726                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
13727                            e.into(),
13728                        ))));
13729                    }
13730                }
13731
13732                // A message has been received from the channel
13733                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13734
13735                std::task::Poll::Ready(Some(match header.ordinal {
13736                    0x11ac2555cf575b54 => {
13737                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13738                        let mut req = fidl::new_empty!(
13739                            fidl::encoding::EmptyPayload,
13740                            fidl::encoding::DefaultFuchsiaResourceDialect
13741                        );
13742                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13743                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13744                        Ok(NodeRequest::Sync {
13745                            responder: NodeSyncResponder {
13746                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13747                                tx_id: header.tx_id,
13748                            },
13749                        })
13750                    }
13751                    0x6a5cae7d6d6e04c6 => {
13752                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13753                        let mut req = fidl::new_empty!(
13754                            fidl::encoding::EmptyPayload,
13755                            fidl::encoding::DefaultFuchsiaResourceDialect
13756                        );
13757                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13758                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13759                        Ok(NodeRequest::Release { control_handle })
13760                    }
13761                    0xb41f1624f48c1e9 => {
13762                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13763                        let mut req = fidl::new_empty!(
13764                            NodeSetNameRequest,
13765                            fidl::encoding::DefaultFuchsiaResourceDialect
13766                        );
13767                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
13768                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13769                        Ok(NodeRequest::SetName { payload: req, control_handle })
13770                    }
13771                    0x5cde8914608d99b1 => {
13772                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13773                        let mut req = fidl::new_empty!(
13774                            NodeSetDebugClientInfoRequest,
13775                            fidl::encoding::DefaultFuchsiaResourceDialect
13776                        );
13777                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
13778                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13779                        Ok(NodeRequest::SetDebugClientInfo { payload: req, control_handle })
13780                    }
13781                    0x716b0af13d5c0806 => {
13782                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13783                        let mut req = fidl::new_empty!(
13784                            NodeSetDebugTimeoutLogDeadlineRequest,
13785                            fidl::encoding::DefaultFuchsiaResourceDialect
13786                        );
13787                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
13788                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13789                        Ok(NodeRequest::SetDebugTimeoutLogDeadline { payload: req, control_handle })
13790                    }
13791                    0x5209c77415b4dfad => {
13792                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13793                        let mut req = fidl::new_empty!(
13794                            fidl::encoding::EmptyPayload,
13795                            fidl::encoding::DefaultFuchsiaResourceDialect
13796                        );
13797                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13798                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13799                        Ok(NodeRequest::SetVerboseLogging { control_handle })
13800                    }
13801                    0x5b3d0e51614df053 => {
13802                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13803                        let mut req = fidl::new_empty!(
13804                            fidl::encoding::EmptyPayload,
13805                            fidl::encoding::DefaultFuchsiaResourceDialect
13806                        );
13807                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13808                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13809                        Ok(NodeRequest::GetNodeRef {
13810                            responder: NodeGetNodeRefResponder {
13811                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13812                                tx_id: header.tx_id,
13813                            },
13814                        })
13815                    }
13816                    0x3a58e00157e0825 => {
13817                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13818                        let mut req = fidl::new_empty!(
13819                            NodeIsAlternateForRequest,
13820                            fidl::encoding::DefaultFuchsiaResourceDialect
13821                        );
13822                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
13823                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13824                        Ok(NodeRequest::IsAlternateFor {
13825                            payload: req,
13826                            responder: NodeIsAlternateForResponder {
13827                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13828                                tx_id: header.tx_id,
13829                            },
13830                        })
13831                    }
13832                    0x77d19a494b78ba8c => {
13833                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13834                        let mut req = fidl::new_empty!(
13835                            fidl::encoding::EmptyPayload,
13836                            fidl::encoding::DefaultFuchsiaResourceDialect
13837                        );
13838                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13839                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13840                        Ok(NodeRequest::GetBufferCollectionId {
13841                            responder: NodeGetBufferCollectionIdResponder {
13842                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13843                                tx_id: header.tx_id,
13844                            },
13845                        })
13846                    }
13847                    0x22dd3ea514eeffe1 => {
13848                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13849                        let mut req = fidl::new_empty!(
13850                            fidl::encoding::EmptyPayload,
13851                            fidl::encoding::DefaultFuchsiaResourceDialect
13852                        );
13853                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13854                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13855                        Ok(NodeRequest::SetWeak { control_handle })
13856                    }
13857                    0x38a44fc4d7724be9 => {
13858                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13859                        let mut req = fidl::new_empty!(
13860                            NodeSetWeakOkRequest,
13861                            fidl::encoding::DefaultFuchsiaResourceDialect
13862                        );
13863                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
13864                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13865                        Ok(NodeRequest::SetWeakOk { payload: req, control_handle })
13866                    }
13867                    0x3f22f2a293d3cdac => {
13868                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13869                        let mut req = fidl::new_empty!(
13870                            NodeAttachNodeTrackingRequest,
13871                            fidl::encoding::DefaultFuchsiaResourceDialect
13872                        );
13873                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
13874                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13875                        Ok(NodeRequest::AttachNodeTracking { payload: req, control_handle })
13876                    }
13877                    _ if header.tx_id == 0
13878                        && header
13879                            .dynamic_flags()
13880                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13881                    {
13882                        Ok(NodeRequest::_UnknownMethod {
13883                            ordinal: header.ordinal,
13884                            control_handle: NodeControlHandle { inner: this.inner.clone() },
13885                            method_type: fidl::MethodType::OneWay,
13886                        })
13887                    }
13888                    _ if header
13889                        .dynamic_flags()
13890                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13891                    {
13892                        this.inner.send_framework_err(
13893                            fidl::encoding::FrameworkErr::UnknownMethod,
13894                            header.tx_id,
13895                            header.ordinal,
13896                            header.dynamic_flags(),
13897                            (bytes, handles),
13898                        )?;
13899                        Ok(NodeRequest::_UnknownMethod {
13900                            ordinal: header.ordinal,
13901                            control_handle: NodeControlHandle { inner: this.inner.clone() },
13902                            method_type: fidl::MethodType::TwoWay,
13903                        })
13904                    }
13905                    _ => Err(fidl::Error::UnknownOrdinal {
13906                        ordinal: header.ordinal,
13907                        protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13908                    }),
13909                }))
13910            },
13911        )
13912    }
13913}
13914
13915/// This protocol is the parent protocol for all nodes in the tree established
13916/// by [`fuchsia.sysmem2/BufferCollectionToken`] creation and
13917/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] creation, including
13918/// [`fuchsia.sysmem2/BufferCollectionToken`](s) which have since been converted
13919/// to a [`fuchsia.sysmem2/BufferCollection`] channel.
13920///
13921/// Epitaphs are not used in this protocol.
13922#[derive(Debug)]
13923pub enum NodeRequest {
13924    /// Ensure that previous messages have been received server side. This is
13925    /// particularly useful after previous messages that created new tokens,
13926    /// because a token must be known to the sysmem server before sending the
13927    /// token to another participant.
13928    ///
13929    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
13930    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
13931    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
13932    /// to mitigate the possibility of a hostile/fake
13933    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
13934    /// Another way is to pass the token to
13935    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
13936    /// the token as part of exchanging it for a
13937    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
13938    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
13939    /// of stalling.
13940    ///
13941    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
13942    /// and then starting and completing a `Sync`, it's then safe to send the
13943    /// `BufferCollectionToken` client ends to other participants knowing the
13944    /// server will recognize the tokens when they're sent by the other
13945    /// participants to sysmem in a
13946    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
13947    /// efficient way to create tokens while avoiding unnecessary round trips.
13948    ///
13949    /// Other options include waiting for each
13950    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
13951    /// individually (using separate call to `Sync` after each), or calling
13952    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
13953    /// converted to a `BufferCollection` via
13954    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
13955    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
13956    /// the sync step and can create multiple tokens at once.
13957    Sync { responder: NodeSyncResponder },
13958    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
13959    ///
13960    /// Normally a participant will convert a `BufferCollectionToken` into a
13961    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
13962    /// `Release` via the token (and then close the channel immediately or
13963    /// shortly later in response to server closing the server end), which
13964    /// avoids causing buffer collection failure. Without a prior `Release`,
13965    /// closing the `BufferCollectionToken` client end will cause buffer
13966    /// collection failure.
13967    ///
13968    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
13969    ///
13970    /// By default the server handles unexpected closure of a
13971    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
13972    /// first) by failing the buffer collection. Partly this is to expedite
13973    /// closing VMO handles to reclaim memory when any participant fails. If a
13974    /// participant would like to cleanly close a `BufferCollection` without
13975    /// causing buffer collection failure, the participant can send `Release`
13976    /// before closing the `BufferCollection` client end. The `Release` can
13977    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
13978    /// buffer collection won't require constraints from this node in order to
13979    /// allocate. If after `SetConstraints`, the constraints are retained and
13980    /// aggregated, despite the lack of `BufferCollection` connection at the
13981    /// time of constraints aggregation.
13982    ///
13983    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13984    ///
13985    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13986    /// end (without `Release` first) will trigger failure of the buffer
13987    /// collection. To close a `BufferCollectionTokenGroup` channel without
13988    /// failing the buffer collection, ensure that AllChildrenPresent() has been
13989    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13990    /// client end.
13991    ///
13992    /// If `Release` occurs before
13993    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13994    /// buffer collection will fail (triggered by reception of `Release` without
13995    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13996    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13997    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13998    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13999    /// close requires `AllChildrenPresent` (if not already sent), then
14000    /// `Release`, then close client end.
14001    ///
14002    /// If `Release` occurs after `AllChildrenPresent`, the children and all
14003    /// their constraints remain intact (just as they would if the
14004    /// `BufferCollectionTokenGroup` channel had remained open), and the client
14005    /// end close doesn't trigger buffer collection failure.
14006    ///
14007    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
14008    ///
14009    /// For brevity, the per-channel-protocol paragraphs above ignore the
14010    /// separate failure domain created by
14011    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
14012    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
14013    /// unexpectedly closes (without `Release` first) and that client end is
14014    /// under a failure domain, instead of failing the whole buffer collection,
14015    /// the failure domain is failed, but the buffer collection itself is
14016    /// isolated from failure of the failure domain. Such failure domains can be
14017    /// nested, in which case only the inner-most failure domain in which the
14018    /// `Node` resides fails.
14019    Release { control_handle: NodeControlHandle },
14020    /// Set a name for VMOs in this buffer collection.
14021    ///
14022    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
14023    /// will be truncated to fit. The name of the vmo will be suffixed with the
14024    /// buffer index within the collection (if the suffix fits within
14025    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
14026    /// listed in the inspect data.
14027    ///
14028    /// The name only affects VMOs allocated after the name is set; this call
14029    /// does not rename existing VMOs. If multiple clients set different names
14030    /// then the larger priority value will win. Setting a new name with the
14031    /// same priority as a prior name doesn't change the name.
14032    ///
14033    /// All table fields are currently required.
14034    ///
14035    /// + request `priority` The name is only set if this is the first `SetName`
14036    ///   or if `priority` is greater than any previous `priority` value in
14037    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
14038    /// + request `name` The name for VMOs created under this buffer collection.
14039    SetName { payload: NodeSetNameRequest, control_handle: NodeControlHandle },
14040    /// Set information about the current client that can be used by sysmem to
14041    /// help diagnose leaking memory and allocation stalls waiting for a
14042    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
14043    ///
14044    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
14045    /// `Node`(s) derived from this `Node`, unless overriden by
14046    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
14047    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
14048    ///
14049    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
14050    /// `Allocator` is the most efficient way to ensure that all
14051    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
14052    /// set, and is also more efficient than separately sending the same debug
14053    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
14054    /// created [`fuchsia.sysmem2/Node`].
14055    ///
14056    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
14057    /// indicate which client is closing their channel first, leading to subtree
14058    /// failure (which can be normal if the purpose of the subtree is over, but
14059    /// if happening earlier than expected, the client-channel-specific name can
14060    /// help diagnose where the failure is first coming from, from sysmem's
14061    /// point of view).
14062    ///
14063    /// All table fields are currently required.
14064    ///
14065    /// + request `name` This can be an arbitrary string, but the current
14066    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
14067    /// + request `id` This can be an arbitrary id, but the current process ID
14068    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
14069    SetDebugClientInfo { payload: NodeSetDebugClientInfoRequest, control_handle: NodeControlHandle },
14070    /// Sysmem logs a warning if sysmem hasn't seen
14071    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
14072    /// within 5 seconds after creation of a new collection.
14073    ///
14074    /// Clients can call this method to change when the log is printed. If
14075    /// multiple client set the deadline, it's unspecified which deadline will
14076    /// take effect.
14077    ///
14078    /// In most cases the default works well.
14079    ///
14080    /// All table fields are currently required.
14081    ///
14082    /// + request `deadline` The time at which sysmem will start trying to log
14083    ///   the warning, unless all constraints are with sysmem by then.
14084    SetDebugTimeoutLogDeadline {
14085        payload: NodeSetDebugTimeoutLogDeadlineRequest,
14086        control_handle: NodeControlHandle,
14087    },
14088    /// This enables verbose logging for the buffer collection.
14089    ///
14090    /// Verbose logging includes constraints set via
14091    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
14092    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
14093    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
14094    /// the tree of `Node`(s).
14095    ///
14096    /// Normally sysmem prints only a single line complaint when aggregation
14097    /// fails, with just the specific detailed reason that aggregation failed,
14098    /// with little surrounding context.  While this is often enough to diagnose
14099    /// a problem if only a small change was made and everything was working
14100    /// before the small change, it's often not particularly helpful for getting
14101    /// a new buffer collection to work for the first time.  Especially with
14102    /// more complex trees of nodes, involving things like
14103    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
14104    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
14105    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
14106    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
14107    /// looks like and why it's failing a logical allocation, or why a tree or
14108    /// subtree is failing sooner than expected.
14109    ///
14110    /// The intent of the extra logging is to be acceptable from a performance
14111    /// point of view, under the assumption that verbose logging is only enabled
14112    /// on a low number of buffer collections. If we're not tracking down a bug,
14113    /// we shouldn't send this message.
14114    SetVerboseLogging { control_handle: NodeControlHandle },
14115    /// This gets a handle that can be used as a parameter to
14116    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
14117    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
14118    /// client obtained this handle from this `Node`.
14119    ///
14120    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
14121    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
14122    /// despite the two calls typically being on different channels.
14123    ///
14124    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
14125    ///
14126    /// All table fields are currently required.
14127    ///
14128    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
14129    ///   different `Node` channel, to prove that the client obtained the handle
14130    ///   from this `Node`.
14131    GetNodeRef { responder: NodeGetNodeRefResponder },
14132    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
14133    /// rooted at a different child token of a common parent
14134    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
14135    /// passed-in `node_ref`.
14136    ///
14137    /// This call is for assisting with admission control de-duplication, and
14138    /// with debugging.
14139    ///
14140    /// The `node_ref` must be obtained using
14141    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
14142    ///
14143    /// The `node_ref` can be a duplicated handle; it's not necessary to call
14144    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
14145    ///
14146    /// If a calling token may not actually be a valid token at all due to a
14147    /// potentially hostile/untrusted provider of the token, call
14148    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
14149    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
14150    /// never responds due to a calling token not being a real token (not really
14151    /// talking to sysmem).  Another option is to call
14152    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
14153    /// which also validates the token along with converting it to a
14154    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
14155    ///
14156    /// All table fields are currently required.
14157    ///
14158    /// - response `is_alternate`
14159    ///   - true: The first parent node in common between the calling node and
14160    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
14161    ///     that the calling `Node` and the `node_ref` `Node` will not have both
14162    ///     their constraints apply - rather sysmem will choose one or the other
14163    ///     of the constraints - never both.  This is because only one child of
14164    ///     a `BufferCollectionTokenGroup` is selected during logical
14165    ///     allocation, with only that one child's subtree contributing to
14166    ///     constraints aggregation.
14167    ///   - false: The first parent node in common between the calling `Node`
14168    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
14169    ///     Currently, this means the first parent node in common is a
14170    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
14171    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
14172    ///     `Node` may have both their constraints apply during constraints
14173    ///     aggregation of the logical allocation, if both `Node`(s) are
14174    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
14175    ///     this case, there is no `BufferCollectionTokenGroup` that will
14176    ///     directly prevent the two `Node`(s) from both being selected and
14177    ///     their constraints both aggregated, but even when false, one or both
14178    ///     `Node`(s) may still be eliminated from consideration if one or both
14179    ///     `Node`(s) has a direct or indirect parent
14180    ///     `BufferCollectionTokenGroup` which selects a child subtree other
14181    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
14182    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
14183    ///   associated with the same buffer collection as the calling `Node`.
14184    ///   Another reason for this error is if the `node_ref` is an
14185    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
14186    ///   a real `node_ref` obtained from `GetNodeRef`.
14187    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
14188    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
14189    ///   the needed rights expected on a real `node_ref`.
14190    /// * No other failing status codes are returned by this call.  However,
14191    ///   sysmem may add additional codes in future, so the client should have
14192    ///   sensible default handling for any failing status code.
14193    IsAlternateFor { payload: NodeIsAlternateForRequest, responder: NodeIsAlternateForResponder },
14194    /// Get the buffer collection ID. This ID is also available from
14195    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
14196    /// within the collection).
14197    ///
14198    /// This call is mainly useful in situations where we can't convey a
14199    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
14200    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
14201    /// handle, which can be joined back up with a `BufferCollection` client end
14202    /// that was created via a different path. Prefer to convey a
14203    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
14204    ///
14205    /// Trusting a `buffer_collection_id` value from a source other than sysmem
14206    /// is analogous to trusting a koid value from a source other than zircon.
14207    /// Both should be avoided unless really necessary, and both require
14208    /// caution. In some situations it may be reasonable to refer to a
14209    /// pre-established `BufferCollection` by `buffer_collection_id` via a
14210    /// protocol for efficiency reasons, but an incoming value purporting to be
14211    /// a `buffer_collection_id` is not sufficient alone to justify granting the
14212    /// sender of the `buffer_collection_id` any capability. The sender must
14213    /// first prove to a receiver that the sender has/had a VMO or has/had a
14214    /// `BufferCollectionToken` to the same collection by sending a handle that
14215    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
14216    /// `buffer_collection_id` value. The receiver should take care to avoid
14217    /// assuming that a sender had a `BufferCollectionToken` in cases where the
14218    /// sender has only proven that the sender had a VMO.
14219    ///
14220    /// - response `buffer_collection_id` This ID is unique per buffer
14221    ///   collection per boot. Each buffer is uniquely identified by the
14222    ///   `buffer_collection_id` and `buffer_index` together.
14223    GetBufferCollectionId { responder: NodeGetBufferCollectionIdResponder },
14224    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
14225    /// created after this message to weak, which means that a client's `Node`
14226    /// client end (or a child created after this message) is not alone
14227    /// sufficient to keep allocated VMOs alive.
14228    ///
14229    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
14230    /// `close_weak_asap`.
14231    ///
14232    /// This message is only permitted before the `Node` becomes ready for
14233    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
14234    ///   * `BufferCollectionToken`: any time
14235    ///   * `BufferCollection`: before `SetConstraints`
14236    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
14237    ///
14238    /// Currently, no conversion from strong `Node` to weak `Node` after ready
14239    /// for allocation is provided, but a client can simulate that by creating
14240    /// an additional `Node` before allocation and setting that additional
14241    /// `Node` to weak, and then potentially at some point later sending
14242    /// `Release` and closing the client end of the client's strong `Node`, but
14243    /// keeping the client's weak `Node`.
14244    ///
14245    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
14246    /// collection failure (all `Node` client end(s) will see
14247    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
14248    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
14249    /// this situation until all `Node`(s) are ready for allocation. For initial
14250    /// allocation to succeed, at least one strong `Node` is required to exist
14251    /// at allocation time, but after that client receives VMO handles, that
14252    /// client can `BufferCollection.Release` and close the client end without
14253    /// causing this type of failure.
14254    ///
14255    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
14256    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
14257    /// separately as appropriate.
14258    SetWeak { control_handle: NodeControlHandle },
14259    /// This indicates to sysmem that the client is prepared to pay attention to
14260    /// `close_weak_asap`.
14261    ///
14262    /// If sent, this message must be before
14263    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
14264    ///
14265    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
14266    /// send this message before `WaitForAllBuffersAllocated`, or a parent
14267    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
14268    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
14269    /// trigger buffer collection failure.
14270    ///
14271    /// This message is necessary because weak sysmem VMOs have not always been
14272    /// a thing, so older clients are not aware of the need to pay attention to
14273    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
14274    /// sysmem weak VMO handles asap. By having this message and requiring
14275    /// participants to indicate their acceptance of this aspect of the overall
14276    /// protocol, we avoid situations where an older client is delivered a weak
14277    /// VMO without any way for sysmem to get that VMO to close quickly later
14278    /// (and on a per-buffer basis).
14279    ///
14280    /// A participant that doesn't handle `close_weak_asap` and also doesn't
14281    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
14282    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
14283    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
14284    /// same participant has a child/delegate which does retrieve VMOs, that
14285    /// child/delegate will need to send `SetWeakOk` before
14286    /// `WaitForAllBuffersAllocated`.
14287    ///
14288    /// + request `for_child_nodes_also` If present and true, this means direct
14289    ///   child nodes of this node created after this message plus all
14290    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
14291    ///   those nodes. Any child node of this node that was created before this
14292    ///   message is not included. This setting is "sticky" in the sense that a
14293    ///   subsequent `SetWeakOk` without this bool set to true does not reset
14294    ///   the server-side bool. If this creates a problem for a participant, a
14295    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
14296    ///   tokens instead, as appropriate. A participant should only set
14297    ///   `for_child_nodes_also` true if the participant can really promise to
14298    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
14299    ///   weak VMO handles held by participants holding the corresponding child
14300    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
14301    ///   which are using sysmem(1) can be weak, despite the clients of those
14302    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
14303    ///   direct way to find out about `close_weak_asap`. This only applies to
14304    ///   descendents of this `Node` which are using sysmem(1), not to this
14305    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
14306    ///   token, which will fail allocation unless an ancestor of this `Node`
14307    ///   specified `for_child_nodes_also` true.
14308    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: NodeControlHandle },
14309    /// The server_end will be closed after this `Node` and any child nodes have
14310    /// have released their buffer counts, making those counts available for
14311    /// reservation by a different `Node` via
14312    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
14313    ///
14314    /// The `Node` buffer counts may not be released until the entire tree of
14315    /// `Node`(s) is closed or failed, because
14316    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
14317    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
14318    /// `Node` buffer counts remain reserved until the orphaned node is later
14319    /// cleaned up.
14320    ///
14321    /// If the `Node` exceeds a fairly large number of attached eventpair server
14322    /// ends, a log message will indicate this and the `Node` (and the
14323    /// appropriate) sub-tree will fail.
14324    ///
14325    /// The `server_end` will remain open when
14326    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
14327    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
14328    /// [`fuchsia.sysmem2/BufferCollection`].
14329    ///
14330    /// This message can also be used with a
14331    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
14332    AttachNodeTracking { payload: NodeAttachNodeTrackingRequest, control_handle: NodeControlHandle },
14333    /// An interaction was received which does not match any known method.
14334    #[non_exhaustive]
14335    _UnknownMethod {
14336        /// Ordinal of the method that was called.
14337        ordinal: u64,
14338        control_handle: NodeControlHandle,
14339        method_type: fidl::MethodType,
14340    },
14341}
14342
14343impl NodeRequest {
14344    #[allow(irrefutable_let_patterns)]
14345    pub fn into_sync(self) -> Option<(NodeSyncResponder)> {
14346        if let NodeRequest::Sync { responder } = self { Some((responder)) } else { None }
14347    }
14348
14349    #[allow(irrefutable_let_patterns)]
14350    pub fn into_release(self) -> Option<(NodeControlHandle)> {
14351        if let NodeRequest::Release { control_handle } = self {
14352            Some((control_handle))
14353        } else {
14354            None
14355        }
14356    }
14357
14358    #[allow(irrefutable_let_patterns)]
14359    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, NodeControlHandle)> {
14360        if let NodeRequest::SetName { payload, control_handle } = self {
14361            Some((payload, control_handle))
14362        } else {
14363            None
14364        }
14365    }
14366
14367    #[allow(irrefutable_let_patterns)]
14368    pub fn into_set_debug_client_info(
14369        self,
14370    ) -> Option<(NodeSetDebugClientInfoRequest, NodeControlHandle)> {
14371        if let NodeRequest::SetDebugClientInfo { payload, control_handle } = self {
14372            Some((payload, control_handle))
14373        } else {
14374            None
14375        }
14376    }
14377
14378    #[allow(irrefutable_let_patterns)]
14379    pub fn into_set_debug_timeout_log_deadline(
14380        self,
14381    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, NodeControlHandle)> {
14382        if let NodeRequest::SetDebugTimeoutLogDeadline { payload, control_handle } = self {
14383            Some((payload, control_handle))
14384        } else {
14385            None
14386        }
14387    }
14388
14389    #[allow(irrefutable_let_patterns)]
14390    pub fn into_set_verbose_logging(self) -> Option<(NodeControlHandle)> {
14391        if let NodeRequest::SetVerboseLogging { control_handle } = self {
14392            Some((control_handle))
14393        } else {
14394            None
14395        }
14396    }
14397
14398    #[allow(irrefutable_let_patterns)]
14399    pub fn into_get_node_ref(self) -> Option<(NodeGetNodeRefResponder)> {
14400        if let NodeRequest::GetNodeRef { responder } = self { Some((responder)) } else { None }
14401    }
14402
14403    #[allow(irrefutable_let_patterns)]
14404    pub fn into_is_alternate_for(
14405        self,
14406    ) -> Option<(NodeIsAlternateForRequest, NodeIsAlternateForResponder)> {
14407        if let NodeRequest::IsAlternateFor { payload, responder } = self {
14408            Some((payload, responder))
14409        } else {
14410            None
14411        }
14412    }
14413
14414    #[allow(irrefutable_let_patterns)]
14415    pub fn into_get_buffer_collection_id(self) -> Option<(NodeGetBufferCollectionIdResponder)> {
14416        if let NodeRequest::GetBufferCollectionId { responder } = self {
14417            Some((responder))
14418        } else {
14419            None
14420        }
14421    }
14422
14423    #[allow(irrefutable_let_patterns)]
14424    pub fn into_set_weak(self) -> Option<(NodeControlHandle)> {
14425        if let NodeRequest::SetWeak { control_handle } = self {
14426            Some((control_handle))
14427        } else {
14428            None
14429        }
14430    }
14431
14432    #[allow(irrefutable_let_patterns)]
14433    pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, NodeControlHandle)> {
14434        if let NodeRequest::SetWeakOk { payload, control_handle } = self {
14435            Some((payload, control_handle))
14436        } else {
14437            None
14438        }
14439    }
14440
14441    #[allow(irrefutable_let_patterns)]
14442    pub fn into_attach_node_tracking(
14443        self,
14444    ) -> Option<(NodeAttachNodeTrackingRequest, NodeControlHandle)> {
14445        if let NodeRequest::AttachNodeTracking { payload, control_handle } = self {
14446            Some((payload, control_handle))
14447        } else {
14448            None
14449        }
14450    }
14451
14452    /// Name of the method defined in FIDL
14453    pub fn method_name(&self) -> &'static str {
14454        match *self {
14455            NodeRequest::Sync { .. } => "sync",
14456            NodeRequest::Release { .. } => "release",
14457            NodeRequest::SetName { .. } => "set_name",
14458            NodeRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
14459            NodeRequest::SetDebugTimeoutLogDeadline { .. } => "set_debug_timeout_log_deadline",
14460            NodeRequest::SetVerboseLogging { .. } => "set_verbose_logging",
14461            NodeRequest::GetNodeRef { .. } => "get_node_ref",
14462            NodeRequest::IsAlternateFor { .. } => "is_alternate_for",
14463            NodeRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
14464            NodeRequest::SetWeak { .. } => "set_weak",
14465            NodeRequest::SetWeakOk { .. } => "set_weak_ok",
14466            NodeRequest::AttachNodeTracking { .. } => "attach_node_tracking",
14467            NodeRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
14468                "unknown one-way method"
14469            }
14470            NodeRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
14471                "unknown two-way method"
14472            }
14473        }
14474    }
14475}
14476
14477#[derive(Debug, Clone)]
14478pub struct NodeControlHandle {
14479    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
14480}
14481
14482impl fidl::endpoints::ControlHandle for NodeControlHandle {
14483    fn shutdown(&self) {
14484        self.inner.shutdown()
14485    }
14486
14487    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
14488        self.inner.shutdown_with_epitaph(status)
14489    }
14490
14491    fn is_closed(&self) -> bool {
14492        self.inner.channel().is_closed()
14493    }
14494    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
14495        self.inner.channel().on_closed()
14496    }
14497
14498    #[cfg(target_os = "fuchsia")]
14499    fn signal_peer(
14500        &self,
14501        clear_mask: zx::Signals,
14502        set_mask: zx::Signals,
14503    ) -> Result<(), zx_status::Status> {
14504        use fidl::Peered;
14505        self.inner.channel().signal_peer(clear_mask, set_mask)
14506    }
14507}
14508
14509impl NodeControlHandle {}
14510
14511#[must_use = "FIDL methods require a response to be sent"]
14512#[derive(Debug)]
14513pub struct NodeSyncResponder {
14514    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14515    tx_id: u32,
14516}
14517
14518/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14519/// if the responder is dropped without sending a response, so that the client
14520/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14521impl std::ops::Drop for NodeSyncResponder {
14522    fn drop(&mut self) {
14523        self.control_handle.shutdown();
14524        // Safety: drops once, never accessed again
14525        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14526    }
14527}
14528
14529impl fidl::endpoints::Responder for NodeSyncResponder {
14530    type ControlHandle = NodeControlHandle;
14531
14532    fn control_handle(&self) -> &NodeControlHandle {
14533        &self.control_handle
14534    }
14535
14536    fn drop_without_shutdown(mut self) {
14537        // Safety: drops once, never accessed again due to mem::forget
14538        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14539        // Prevent Drop from running (which would shut down the channel)
14540        std::mem::forget(self);
14541    }
14542}
14543
14544impl NodeSyncResponder {
14545    /// Sends a response to the FIDL transaction.
14546    ///
14547    /// Sets the channel to shutdown if an error occurs.
14548    pub fn send(self) -> Result<(), fidl::Error> {
14549        let _result = self.send_raw();
14550        if _result.is_err() {
14551            self.control_handle.shutdown();
14552        }
14553        self.drop_without_shutdown();
14554        _result
14555    }
14556
14557    /// Similar to "send" but does not shutdown the channel if an error occurs.
14558    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
14559        let _result = self.send_raw();
14560        self.drop_without_shutdown();
14561        _result
14562    }
14563
14564    fn send_raw(&self) -> Result<(), fidl::Error> {
14565        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
14566            fidl::encoding::Flexible::new(()),
14567            self.tx_id,
14568            0x11ac2555cf575b54,
14569            fidl::encoding::DynamicFlags::FLEXIBLE,
14570        )
14571    }
14572}
14573
14574#[must_use = "FIDL methods require a response to be sent"]
14575#[derive(Debug)]
14576pub struct NodeGetNodeRefResponder {
14577    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14578    tx_id: u32,
14579}
14580
14581/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14582/// if the responder is dropped without sending a response, so that the client
14583/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14584impl std::ops::Drop for NodeGetNodeRefResponder {
14585    fn drop(&mut self) {
14586        self.control_handle.shutdown();
14587        // Safety: drops once, never accessed again
14588        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14589    }
14590}
14591
14592impl fidl::endpoints::Responder for NodeGetNodeRefResponder {
14593    type ControlHandle = NodeControlHandle;
14594
14595    fn control_handle(&self) -> &NodeControlHandle {
14596        &self.control_handle
14597    }
14598
14599    fn drop_without_shutdown(mut self) {
14600        // Safety: drops once, never accessed again due to mem::forget
14601        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14602        // Prevent Drop from running (which would shut down the channel)
14603        std::mem::forget(self);
14604    }
14605}
14606
14607impl NodeGetNodeRefResponder {
14608    /// Sends a response to the FIDL transaction.
14609    ///
14610    /// Sets the channel to shutdown if an error occurs.
14611    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14612        let _result = self.send_raw(payload);
14613        if _result.is_err() {
14614            self.control_handle.shutdown();
14615        }
14616        self.drop_without_shutdown();
14617        _result
14618    }
14619
14620    /// Similar to "send" but does not shutdown the channel if an error occurs.
14621    pub fn send_no_shutdown_on_err(
14622        self,
14623        mut payload: NodeGetNodeRefResponse,
14624    ) -> Result<(), fidl::Error> {
14625        let _result = self.send_raw(payload);
14626        self.drop_without_shutdown();
14627        _result
14628    }
14629
14630    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14631        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
14632            fidl::encoding::Flexible::new(&mut payload),
14633            self.tx_id,
14634            0x5b3d0e51614df053,
14635            fidl::encoding::DynamicFlags::FLEXIBLE,
14636        )
14637    }
14638}
14639
14640#[must_use = "FIDL methods require a response to be sent"]
14641#[derive(Debug)]
14642pub struct NodeIsAlternateForResponder {
14643    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14644    tx_id: u32,
14645}
14646
14647/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14648/// if the responder is dropped without sending a response, so that the client
14649/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14650impl std::ops::Drop for NodeIsAlternateForResponder {
14651    fn drop(&mut self) {
14652        self.control_handle.shutdown();
14653        // Safety: drops once, never accessed again
14654        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14655    }
14656}
14657
14658impl fidl::endpoints::Responder for NodeIsAlternateForResponder {
14659    type ControlHandle = NodeControlHandle;
14660
14661    fn control_handle(&self) -> &NodeControlHandle {
14662        &self.control_handle
14663    }
14664
14665    fn drop_without_shutdown(mut self) {
14666        // Safety: drops once, never accessed again due to mem::forget
14667        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14668        // Prevent Drop from running (which would shut down the channel)
14669        std::mem::forget(self);
14670    }
14671}
14672
14673impl NodeIsAlternateForResponder {
14674    /// Sends a response to the FIDL transaction.
14675    ///
14676    /// Sets the channel to shutdown if an error occurs.
14677    pub fn send(
14678        self,
14679        mut result: Result<&NodeIsAlternateForResponse, Error>,
14680    ) -> Result<(), fidl::Error> {
14681        let _result = self.send_raw(result);
14682        if _result.is_err() {
14683            self.control_handle.shutdown();
14684        }
14685        self.drop_without_shutdown();
14686        _result
14687    }
14688
14689    /// Similar to "send" but does not shutdown the channel if an error occurs.
14690    pub fn send_no_shutdown_on_err(
14691        self,
14692        mut result: Result<&NodeIsAlternateForResponse, Error>,
14693    ) -> Result<(), fidl::Error> {
14694        let _result = self.send_raw(result);
14695        self.drop_without_shutdown();
14696        _result
14697    }
14698
14699    fn send_raw(
14700        &self,
14701        mut result: Result<&NodeIsAlternateForResponse, Error>,
14702    ) -> Result<(), fidl::Error> {
14703        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
14704            NodeIsAlternateForResponse,
14705            Error,
14706        >>(
14707            fidl::encoding::FlexibleResult::new(result),
14708            self.tx_id,
14709            0x3a58e00157e0825,
14710            fidl::encoding::DynamicFlags::FLEXIBLE,
14711        )
14712    }
14713}
14714
14715#[must_use = "FIDL methods require a response to be sent"]
14716#[derive(Debug)]
14717pub struct NodeGetBufferCollectionIdResponder {
14718    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14719    tx_id: u32,
14720}
14721
14722/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14723/// if the responder is dropped without sending a response, so that the client
14724/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14725impl std::ops::Drop for NodeGetBufferCollectionIdResponder {
14726    fn drop(&mut self) {
14727        self.control_handle.shutdown();
14728        // Safety: drops once, never accessed again
14729        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14730    }
14731}
14732
14733impl fidl::endpoints::Responder for NodeGetBufferCollectionIdResponder {
14734    type ControlHandle = NodeControlHandle;
14735
14736    fn control_handle(&self) -> &NodeControlHandle {
14737        &self.control_handle
14738    }
14739
14740    fn drop_without_shutdown(mut self) {
14741        // Safety: drops once, never accessed again due to mem::forget
14742        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14743        // Prevent Drop from running (which would shut down the channel)
14744        std::mem::forget(self);
14745    }
14746}
14747
14748impl NodeGetBufferCollectionIdResponder {
14749    /// Sends a response to the FIDL transaction.
14750    ///
14751    /// Sets the channel to shutdown if an error occurs.
14752    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14753        let _result = self.send_raw(payload);
14754        if _result.is_err() {
14755            self.control_handle.shutdown();
14756        }
14757        self.drop_without_shutdown();
14758        _result
14759    }
14760
14761    /// Similar to "send" but does not shutdown the channel if an error occurs.
14762    pub fn send_no_shutdown_on_err(
14763        self,
14764        mut payload: &NodeGetBufferCollectionIdResponse,
14765    ) -> Result<(), fidl::Error> {
14766        let _result = self.send_raw(payload);
14767        self.drop_without_shutdown();
14768        _result
14769    }
14770
14771    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14772        self.control_handle
14773            .inner
14774            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
14775                fidl::encoding::Flexible::new(payload),
14776                self.tx_id,
14777                0x77d19a494b78ba8c,
14778                fidl::encoding::DynamicFlags::FLEXIBLE,
14779            )
14780    }
14781}
14782
14783#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
14784pub struct SecureMemMarker;
14785
14786impl fidl::endpoints::ProtocolMarker for SecureMemMarker {
14787    type Proxy = SecureMemProxy;
14788    type RequestStream = SecureMemRequestStream;
14789    #[cfg(target_os = "fuchsia")]
14790    type SynchronousProxy = SecureMemSynchronousProxy;
14791
14792    const DEBUG_NAME: &'static str = "(anonymous) SecureMem";
14793}
14794pub type SecureMemGetPhysicalSecureHeapsResult =
14795    Result<SecureMemGetPhysicalSecureHeapsResponse, Error>;
14796pub type SecureMemGetDynamicSecureHeapsResult =
14797    Result<SecureMemGetDynamicSecureHeapsResponse, Error>;
14798pub type SecureMemGetPhysicalSecureHeapPropertiesResult =
14799    Result<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>;
14800pub type SecureMemAddSecureHeapPhysicalRangeResult = Result<(), Error>;
14801pub type SecureMemDeleteSecureHeapPhysicalRangeResult = Result<(), Error>;
14802pub type SecureMemModifySecureHeapPhysicalRangeResult = Result<(), Error>;
14803pub type SecureMemZeroSubRangeResult = Result<(), Error>;
14804
14805pub trait SecureMemProxyInterface: Send + Sync {
14806    type GetPhysicalSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error>>
14807        + Send;
14808    fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut;
14809    type GetDynamicSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error>>
14810        + Send;
14811    fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut;
14812    type GetPhysicalSecureHeapPropertiesResponseFut: std::future::Future<
14813            Output = Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error>,
14814        > + Send;
14815    fn r#get_physical_secure_heap_properties(
14816        &self,
14817        payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14818    ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut;
14819    type AddSecureHeapPhysicalRangeResponseFut: std::future::Future<Output = Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error>>
14820        + Send;
14821    fn r#add_secure_heap_physical_range(
14822        &self,
14823        payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
14824    ) -> Self::AddSecureHeapPhysicalRangeResponseFut;
14825    type DeleteSecureHeapPhysicalRangeResponseFut: std::future::Future<
14826            Output = Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error>,
14827        > + Send;
14828    fn r#delete_secure_heap_physical_range(
14829        &self,
14830        payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
14831    ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut;
14832    type ModifySecureHeapPhysicalRangeResponseFut: std::future::Future<
14833            Output = Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error>,
14834        > + Send;
14835    fn r#modify_secure_heap_physical_range(
14836        &self,
14837        payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
14838    ) -> Self::ModifySecureHeapPhysicalRangeResponseFut;
14839    type ZeroSubRangeResponseFut: std::future::Future<Output = Result<SecureMemZeroSubRangeResult, fidl::Error>>
14840        + Send;
14841    fn r#zero_sub_range(
14842        &self,
14843        payload: &SecureMemZeroSubRangeRequest,
14844    ) -> Self::ZeroSubRangeResponseFut;
14845}
14846#[derive(Debug)]
14847#[cfg(target_os = "fuchsia")]
14848pub struct SecureMemSynchronousProxy {
14849    client: fidl::client::sync::Client,
14850}
14851
14852#[cfg(target_os = "fuchsia")]
14853impl fidl::endpoints::SynchronousProxy for SecureMemSynchronousProxy {
14854    type Proxy = SecureMemProxy;
14855    type Protocol = SecureMemMarker;
14856
14857    fn from_channel(inner: fidl::Channel) -> Self {
14858        Self::new(inner)
14859    }
14860
14861    fn into_channel(self) -> fidl::Channel {
14862        self.client.into_channel()
14863    }
14864
14865    fn as_channel(&self) -> &fidl::Channel {
14866        self.client.as_channel()
14867    }
14868}
14869
14870#[cfg(target_os = "fuchsia")]
14871impl SecureMemSynchronousProxy {
14872    pub fn new(channel: fidl::Channel) -> Self {
14873        let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
14874        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
14875    }
14876
14877    pub fn into_channel(self) -> fidl::Channel {
14878        self.client.into_channel()
14879    }
14880
14881    /// Waits until an event arrives and returns it. It is safe for other
14882    /// threads to make concurrent requests while waiting for an event.
14883    pub fn wait_for_event(
14884        &self,
14885        deadline: zx::MonotonicInstant,
14886    ) -> Result<SecureMemEvent, fidl::Error> {
14887        SecureMemEvent::decode(self.client.wait_for_event(deadline)?)
14888    }
14889
14890    /// Gets the physical address and length of any secure heap whose physical
14891    /// range is configured via the TEE.
14892    ///
14893    /// Presently, these will be fixed physical addresses and lengths, with the
14894    /// location plumbed via the TEE.
14895    ///
14896    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
14897    /// when there isn't any special heap-specific per-VMO setup or teardown
14898    /// required.
14899    ///
14900    /// The physical range must be secured/protected by the TEE before the
14901    /// securemem driver responds to this request with success.
14902    ///
14903    /// Sysmem should only call this once.  Returning zero heaps is not a
14904    /// failure.
14905    ///
14906    /// Errors:
14907    ///  * PROTOCOL_DEVIATION - called more than once.
14908    ///  * UNSPECIFIED - generic internal error (such as in communication
14909    ///    with TEE which doesn't generate zx_status_t errors).
14910    ///  * other errors are allowed; any other errors should be treated the same
14911    ///    as UNSPECIFIED.
14912    pub fn r#get_physical_secure_heaps(
14913        &self,
14914        ___deadline: zx::MonotonicInstant,
14915    ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
14916        let _response = self.client.send_query::<
14917            fidl::encoding::EmptyPayload,
14918            fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
14919        >(
14920            (),
14921            0x38716300592073e3,
14922            fidl::encoding::DynamicFlags::FLEXIBLE,
14923            ___deadline,
14924        )?
14925        .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
14926        Ok(_response.map(|x| x))
14927    }
14928
14929    /// Gets information about any secure heaps whose physical pages are not
14930    /// configured by the TEE, but by sysmem.
14931    ///
14932    /// Sysmem should only call this once. Returning zero heaps is not a
14933    /// failure.
14934    ///
14935    /// Errors:
14936    ///  * PROTOCOL_DEVIATION - called more than once.
14937    ///  * UNSPECIFIED - generic internal error (such as in communication
14938    ///    with TEE which doesn't generate zx_status_t errors).
14939    ///  * other errors are allowed; any other errors should be treated the same
14940    ///    as UNSPECIFIED.
14941    pub fn r#get_dynamic_secure_heaps(
14942        &self,
14943        ___deadline: zx::MonotonicInstant,
14944    ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
14945        let _response = self.client.send_query::<
14946            fidl::encoding::EmptyPayload,
14947            fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
14948        >(
14949            (),
14950            0x1190847f99952834,
14951            fidl::encoding::DynamicFlags::FLEXIBLE,
14952            ___deadline,
14953        )?
14954        .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
14955        Ok(_response.map(|x| x))
14956    }
14957
14958    /// This request from sysmem to the securemem driver gets the properties of
14959    /// a protected/secure heap.
14960    ///
14961    /// This only handles heaps with a single contiguous physical extent.
14962    ///
14963    /// The heap's entire physical range is indicated in case this request needs
14964    /// some physical space to auto-detect how many ranges are REE-usable.  Any
14965    /// temporary HW protection ranges will be deleted before this request
14966    /// completes.
14967    ///
14968    /// Errors:
14969    ///  * UNSPECIFIED - generic internal error (such as in communication
14970    ///    with TEE which doesn't generate zx_status_t errors).
14971    ///  * other errors are allowed; any other errors should be treated the same
14972    ///    as UNSPECIFIED.
14973    pub fn r#get_physical_secure_heap_properties(
14974        &self,
14975        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14976        ___deadline: zx::MonotonicInstant,
14977    ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
14978        let _response = self.client.send_query::<
14979            SecureMemGetPhysicalSecureHeapPropertiesRequest,
14980            fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
14981        >(
14982            payload,
14983            0xc6f06889009c7bc,
14984            fidl::encoding::DynamicFlags::FLEXIBLE,
14985            ___deadline,
14986        )?
14987        .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
14988        Ok(_response.map(|x| x))
14989    }
14990
14991    /// This request from sysmem to the securemem driver conveys a physical
14992    /// range to add, for a heap whose physical range(s) are set up via
14993    /// sysmem.
14994    ///
14995    /// Only sysmem can call this because only sysmem is handed the client end
14996    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
14997    /// securemem driver is the server end of this protocol.
14998    ///
14999    /// The securemem driver must configure all the covered offsets as protected
15000    /// before responding to this message with success.
15001    ///
15002    /// On failure, the securemem driver must ensure the protected range was not
15003    /// created.
15004    ///
15005    /// Sysmem must only call this up to once if dynamic_protection_ranges
15006    /// false.
15007    ///
15008    /// If dynamic_protection_ranges is true, sysmem can call this multiple
15009    /// times as long as the current number of ranges never exceeds
15010    /// max_protected_range_count.
15011    ///
15012    /// The caller must not attempt to add a range that matches an
15013    /// already-existing range.  Added ranges can overlap each other as long as
15014    /// no two ranges match exactly.
15015    ///
15016    /// Errors:
15017    ///   * PROTOCOL_DEVIATION - called more than once when
15018    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
15019    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
15020    ///     range that doesn't conform to protected_range_granularity. See log.
15021    ///   * UNSPECIFIED - generic internal error (such as in communication
15022    ///     with TEE which doesn't generate zx_status_t errors).
15023    ///   * other errors are possible, such as from communication failures or
15024    ///     server propagation of failures.
15025    pub fn r#add_secure_heap_physical_range(
15026        &self,
15027        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15028        ___deadline: zx::MonotonicInstant,
15029    ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15030        let _response = self.client.send_query::<
15031            SecureMemAddSecureHeapPhysicalRangeRequest,
15032            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15033        >(
15034            payload,
15035            0x35f695b9b6c7217a,
15036            fidl::encoding::DynamicFlags::FLEXIBLE,
15037            ___deadline,
15038        )?
15039        .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15040        Ok(_response.map(|x| x))
15041    }
15042
15043    /// This request from sysmem to the securemem driver conveys a physical
15044    /// range to delete, for a heap whose physical range(s) are set up via
15045    /// sysmem.
15046    ///
15047    /// Only sysmem can call this because only sysmem is handed the client end
15048    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15049    /// securemem driver is the server end of this protocol.
15050    ///
15051    /// The securemem driver must configure all the covered offsets as not
15052    /// protected before responding to this message with success.
15053    ///
15054    /// On failure, the securemem driver must ensure the protected range was not
15055    /// deleted.
15056    ///
15057    /// Sysmem must not call this if dynamic_protection_ranges false.
15058    ///
15059    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15060    /// on various ranges that exist at the time of the call.
15061    ///
15062    /// If any portion of the range being deleted is not also covered by another
15063    /// protected range, then any ongoing DMA to any part of the entire range
15064    /// may be interrupted / may fail, potentially in a way that's disruptive to
15065    /// the entire system (bus lockup or similar, depending on device details).
15066    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15067    /// any portion of the range being deleted, unless the caller has other
15068    /// active ranges covering every block of the range being deleted.  Ongoing
15069    /// DMA to/from blocks outside the range being deleted is never impacted by
15070    /// the deletion.
15071    ///
15072    /// Errors:
15073    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15074    ///     Unexpected heap, or range that doesn't conform to
15075    ///     protected_range_granularity.
15076    ///   * UNSPECIFIED - generic internal error (such as in communication
15077    ///     with TEE which doesn't generate zx_status_t errors).
15078    ///   * NOT_FOUND - the specified range is not found.
15079    ///   * other errors are possible, such as from communication failures or
15080    ///     server propagation of failures.
15081    pub fn r#delete_secure_heap_physical_range(
15082        &self,
15083        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15084        ___deadline: zx::MonotonicInstant,
15085    ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15086        let _response = self.client.send_query::<
15087            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15088            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15089        >(
15090            payload,
15091            0xeaa58c650264c9e,
15092            fidl::encoding::DynamicFlags::FLEXIBLE,
15093            ___deadline,
15094        )?
15095        .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15096        Ok(_response.map(|x| x))
15097    }
15098
15099    /// This request from sysmem to the securemem driver conveys a physical
15100    /// range to modify and its new base and length, for a heap whose physical
15101    /// range(s) are set up via sysmem.
15102    ///
15103    /// Only sysmem can call this because only sysmem is handed the client end
15104    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15105    /// securemem driver is the server end of this protocol.
15106    ///
15107    /// The securemem driver must configure the range to cover only the new
15108    /// offsets before responding to this message with success.
15109    ///
15110    /// On failure, the securemem driver must ensure the range was not changed.
15111    ///
15112    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
15113    /// must not call this if !is_mod_protected_range_available.
15114    ///
15115    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15116    /// on various ranges that exist at the time of the call.
15117    ///
15118    /// The range must only be modified at one end or the other, but not both.
15119    /// If the range is getting shorter, and the un-covered blocks are not
15120    /// covered by other active ranges, any ongoing DMA to the entire range
15121    /// that's geting shorter may fail in a way that disrupts the entire system
15122    /// (bus lockup or similar), so the caller must ensure that no DMA is
15123    /// ongoing to any portion of a range that is getting shorter, unless the
15124    /// blocks being un-covered by the modification to this range are all
15125    /// covered by other active ranges, in which case no disruption to ongoing
15126    /// DMA will occur.
15127    ///
15128    /// If a range is modified to become <= zero length, the range is deleted.
15129    ///
15130    /// Errors:
15131    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15132    ///     Unexpected heap, or old_range or new_range that doesn't conform to
15133    ///     protected_range_granularity, or old_range and new_range differ in
15134    ///     both begin and end (disallowed).
15135    ///   * UNSPECIFIED - generic internal error (such as in communication
15136    ///     with TEE which doesn't generate zx_status_t errors).
15137    ///   * NOT_FOUND - the specified range is not found.
15138    ///   * other errors are possible, such as from communication failures or
15139    ///     server propagation of failures.
15140    pub fn r#modify_secure_heap_physical_range(
15141        &self,
15142        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15143        ___deadline: zx::MonotonicInstant,
15144    ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15145        let _response = self.client.send_query::<
15146            SecureMemModifySecureHeapPhysicalRangeRequest,
15147            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15148        >(
15149            payload,
15150            0x60b7448aa1187734,
15151            fidl::encoding::DynamicFlags::FLEXIBLE,
15152            ___deadline,
15153        )?
15154        .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15155        Ok(_response.map(|x| x))
15156    }
15157
15158    /// Zero a sub-range of a currently-existing physical range added via
15159    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
15160    /// exactly one physical range, and must not overlap with any other
15161    /// physical range.
15162    ///
15163    /// is_covering_range_explicit - When true, the covering range must be one
15164    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15165    ///     possibly modified since.  When false, the covering range must not
15166    ///     be one of the ranges explicitly created via
15167    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
15168    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
15169    ///     covering range is typically the entire physical range (or a range
15170    ///     which covers even more) of a heap configured by the TEE and whose
15171    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15172    ///
15173    /// Ongoing DMA is not disrupted by this request.
15174    ///
15175    /// Errors:
15176    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15177    ///     Unexpected heap.
15178    ///   * UNSPECIFIED - generic internal error (such as in communication
15179    ///     with TEE which doesn't generate zx_status_t errors).
15180    ///   * other errors are possible, such as from communication failures or
15181    ///     server propagation of failures.
15182    pub fn r#zero_sub_range(
15183        &self,
15184        mut payload: &SecureMemZeroSubRangeRequest,
15185        ___deadline: zx::MonotonicInstant,
15186    ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15187        let _response = self.client.send_query::<
15188            SecureMemZeroSubRangeRequest,
15189            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15190        >(
15191            payload,
15192            0x5b25b7901a385ce5,
15193            fidl::encoding::DynamicFlags::FLEXIBLE,
15194            ___deadline,
15195        )?
15196        .into_result::<SecureMemMarker>("zero_sub_range")?;
15197        Ok(_response.map(|x| x))
15198    }
15199}
15200
15201#[cfg(target_os = "fuchsia")]
15202impl From<SecureMemSynchronousProxy> for zx::NullableHandle {
15203    fn from(value: SecureMemSynchronousProxy) -> Self {
15204        value.into_channel().into()
15205    }
15206}
15207
15208#[cfg(target_os = "fuchsia")]
15209impl From<fidl::Channel> for SecureMemSynchronousProxy {
15210    fn from(value: fidl::Channel) -> Self {
15211        Self::new(value)
15212    }
15213}
15214
15215#[cfg(target_os = "fuchsia")]
15216impl fidl::endpoints::FromClient for SecureMemSynchronousProxy {
15217    type Protocol = SecureMemMarker;
15218
15219    fn from_client(value: fidl::endpoints::ClientEnd<SecureMemMarker>) -> Self {
15220        Self::new(value.into_channel())
15221    }
15222}
15223
15224#[derive(Debug, Clone)]
15225pub struct SecureMemProxy {
15226    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
15227}
15228
15229impl fidl::endpoints::Proxy for SecureMemProxy {
15230    type Protocol = SecureMemMarker;
15231
15232    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
15233        Self::new(inner)
15234    }
15235
15236    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
15237        self.client.into_channel().map_err(|client| Self { client })
15238    }
15239
15240    fn as_channel(&self) -> &::fidl::AsyncChannel {
15241        self.client.as_channel()
15242    }
15243}
15244
15245impl SecureMemProxy {
15246    /// Create a new Proxy for fuchsia.sysmem2/SecureMem.
15247    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
15248        let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
15249        Self { client: fidl::client::Client::new(channel, protocol_name) }
15250    }
15251
15252    /// Get a Stream of events from the remote end of the protocol.
15253    ///
15254    /// # Panics
15255    ///
15256    /// Panics if the event stream was already taken.
15257    pub fn take_event_stream(&self) -> SecureMemEventStream {
15258        SecureMemEventStream { event_receiver: self.client.take_event_receiver() }
15259    }
15260
15261    /// Gets the physical address and length of any secure heap whose physical
15262    /// range is configured via the TEE.
15263    ///
15264    /// Presently, these will be fixed physical addresses and lengths, with the
15265    /// location plumbed via the TEE.
15266    ///
15267    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
15268    /// when there isn't any special heap-specific per-VMO setup or teardown
15269    /// required.
15270    ///
15271    /// The physical range must be secured/protected by the TEE before the
15272    /// securemem driver responds to this request with success.
15273    ///
15274    /// Sysmem should only call this once.  Returning zero heaps is not a
15275    /// failure.
15276    ///
15277    /// Errors:
15278    ///  * PROTOCOL_DEVIATION - called more than once.
15279    ///  * UNSPECIFIED - generic internal error (such as in communication
15280    ///    with TEE which doesn't generate zx_status_t errors).
15281    ///  * other errors are allowed; any other errors should be treated the same
15282    ///    as UNSPECIFIED.
15283    pub fn r#get_physical_secure_heaps(
15284        &self,
15285    ) -> fidl::client::QueryResponseFut<
15286        SecureMemGetPhysicalSecureHeapsResult,
15287        fidl::encoding::DefaultFuchsiaResourceDialect,
15288    > {
15289        SecureMemProxyInterface::r#get_physical_secure_heaps(self)
15290    }
15291
15292    /// Gets information about any secure heaps whose physical pages are not
15293    /// configured by the TEE, but by sysmem.
15294    ///
15295    /// Sysmem should only call this once. Returning zero heaps is not a
15296    /// failure.
15297    ///
15298    /// Errors:
15299    ///  * PROTOCOL_DEVIATION - called more than once.
15300    ///  * UNSPECIFIED - generic internal error (such as in communication
15301    ///    with TEE which doesn't generate zx_status_t errors).
15302    ///  * other errors are allowed; any other errors should be treated the same
15303    ///    as UNSPECIFIED.
15304    pub fn r#get_dynamic_secure_heaps(
15305        &self,
15306    ) -> fidl::client::QueryResponseFut<
15307        SecureMemGetDynamicSecureHeapsResult,
15308        fidl::encoding::DefaultFuchsiaResourceDialect,
15309    > {
15310        SecureMemProxyInterface::r#get_dynamic_secure_heaps(self)
15311    }
15312
15313    /// This request from sysmem to the securemem driver gets the properties of
15314    /// a protected/secure heap.
15315    ///
15316    /// This only handles heaps with a single contiguous physical extent.
15317    ///
15318    /// The heap's entire physical range is indicated in case this request needs
15319    /// some physical space to auto-detect how many ranges are REE-usable.  Any
15320    /// temporary HW protection ranges will be deleted before this request
15321    /// completes.
15322    ///
15323    /// Errors:
15324    ///  * UNSPECIFIED - generic internal error (such as in communication
15325    ///    with TEE which doesn't generate zx_status_t errors).
15326    ///  * other errors are allowed; any other errors should be treated the same
15327    ///    as UNSPECIFIED.
15328    pub fn r#get_physical_secure_heap_properties(
15329        &self,
15330        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15331    ) -> fidl::client::QueryResponseFut<
15332        SecureMemGetPhysicalSecureHeapPropertiesResult,
15333        fidl::encoding::DefaultFuchsiaResourceDialect,
15334    > {
15335        SecureMemProxyInterface::r#get_physical_secure_heap_properties(self, payload)
15336    }
15337
15338    /// This request from sysmem to the securemem driver conveys a physical
15339    /// range to add, for a heap whose physical range(s) are set up via
15340    /// sysmem.
15341    ///
15342    /// Only sysmem can call this because only sysmem is handed the client end
15343    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15344    /// securemem driver is the server end of this protocol.
15345    ///
15346    /// The securemem driver must configure all the covered offsets as protected
15347    /// before responding to this message with success.
15348    ///
15349    /// On failure, the securemem driver must ensure the protected range was not
15350    /// created.
15351    ///
15352    /// Sysmem must only call this up to once if dynamic_protection_ranges
15353    /// false.
15354    ///
15355    /// If dynamic_protection_ranges is true, sysmem can call this multiple
15356    /// times as long as the current number of ranges never exceeds
15357    /// max_protected_range_count.
15358    ///
15359    /// The caller must not attempt to add a range that matches an
15360    /// already-existing range.  Added ranges can overlap each other as long as
15361    /// no two ranges match exactly.
15362    ///
15363    /// Errors:
15364    ///   * PROTOCOL_DEVIATION - called more than once when
15365    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
15366    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
15367    ///     range that doesn't conform to protected_range_granularity. See log.
15368    ///   * UNSPECIFIED - generic internal error (such as in communication
15369    ///     with TEE which doesn't generate zx_status_t errors).
15370    ///   * other errors are possible, such as from communication failures or
15371    ///     server propagation of failures.
15372    pub fn r#add_secure_heap_physical_range(
15373        &self,
15374        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15375    ) -> fidl::client::QueryResponseFut<
15376        SecureMemAddSecureHeapPhysicalRangeResult,
15377        fidl::encoding::DefaultFuchsiaResourceDialect,
15378    > {
15379        SecureMemProxyInterface::r#add_secure_heap_physical_range(self, payload)
15380    }
15381
15382    /// This request from sysmem to the securemem driver conveys a physical
15383    /// range to delete, for a heap whose physical range(s) are set up via
15384    /// sysmem.
15385    ///
15386    /// Only sysmem can call this because only sysmem is handed the client end
15387    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15388    /// securemem driver is the server end of this protocol.
15389    ///
15390    /// The securemem driver must configure all the covered offsets as not
15391    /// protected before responding to this message with success.
15392    ///
15393    /// On failure, the securemem driver must ensure the protected range was not
15394    /// deleted.
15395    ///
15396    /// Sysmem must not call this if dynamic_protection_ranges false.
15397    ///
15398    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15399    /// on various ranges that exist at the time of the call.
15400    ///
15401    /// If any portion of the range being deleted is not also covered by another
15402    /// protected range, then any ongoing DMA to any part of the entire range
15403    /// may be interrupted / may fail, potentially in a way that's disruptive to
15404    /// the entire system (bus lockup or similar, depending on device details).
15405    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15406    /// any portion of the range being deleted, unless the caller has other
15407    /// active ranges covering every block of the range being deleted.  Ongoing
15408    /// DMA to/from blocks outside the range being deleted is never impacted by
15409    /// the deletion.
15410    ///
15411    /// Errors:
15412    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15413    ///     Unexpected heap, or range that doesn't conform to
15414    ///     protected_range_granularity.
15415    ///   * UNSPECIFIED - generic internal error (such as in communication
15416    ///     with TEE which doesn't generate zx_status_t errors).
15417    ///   * NOT_FOUND - the specified range is not found.
15418    ///   * other errors are possible, such as from communication failures or
15419    ///     server propagation of failures.
15420    pub fn r#delete_secure_heap_physical_range(
15421        &self,
15422        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15423    ) -> fidl::client::QueryResponseFut<
15424        SecureMemDeleteSecureHeapPhysicalRangeResult,
15425        fidl::encoding::DefaultFuchsiaResourceDialect,
15426    > {
15427        SecureMemProxyInterface::r#delete_secure_heap_physical_range(self, payload)
15428    }
15429
15430    /// This request from sysmem to the securemem driver conveys a physical
15431    /// range to modify and its new base and length, for a heap whose physical
15432    /// range(s) are set up via sysmem.
15433    ///
15434    /// Only sysmem can call this because only sysmem is handed the client end
15435    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15436    /// securemem driver is the server end of this protocol.
15437    ///
15438    /// The securemem driver must configure the range to cover only the new
15439    /// offsets before responding to this message with success.
15440    ///
15441    /// On failure, the securemem driver must ensure the range was not changed.
15442    ///
15443    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
15444    /// must not call this if !is_mod_protected_range_available.
15445    ///
15446    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15447    /// on various ranges that exist at the time of the call.
15448    ///
15449    /// The range must only be modified at one end or the other, but not both.
15450    /// If the range is getting shorter, and the un-covered blocks are not
15451    /// covered by other active ranges, any ongoing DMA to the entire range
15452    /// that's geting shorter may fail in a way that disrupts the entire system
15453    /// (bus lockup or similar), so the caller must ensure that no DMA is
15454    /// ongoing to any portion of a range that is getting shorter, unless the
15455    /// blocks being un-covered by the modification to this range are all
15456    /// covered by other active ranges, in which case no disruption to ongoing
15457    /// DMA will occur.
15458    ///
15459    /// If a range is modified to become <= zero length, the range is deleted.
15460    ///
15461    /// Errors:
15462    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15463    ///     Unexpected heap, or old_range or new_range that doesn't conform to
15464    ///     protected_range_granularity, or old_range and new_range differ in
15465    ///     both begin and end (disallowed).
15466    ///   * UNSPECIFIED - generic internal error (such as in communication
15467    ///     with TEE which doesn't generate zx_status_t errors).
15468    ///   * NOT_FOUND - the specified range is not found.
15469    ///   * other errors are possible, such as from communication failures or
15470    ///     server propagation of failures.
15471    pub fn r#modify_secure_heap_physical_range(
15472        &self,
15473        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15474    ) -> fidl::client::QueryResponseFut<
15475        SecureMemModifySecureHeapPhysicalRangeResult,
15476        fidl::encoding::DefaultFuchsiaResourceDialect,
15477    > {
15478        SecureMemProxyInterface::r#modify_secure_heap_physical_range(self, payload)
15479    }
15480
15481    /// Zero a sub-range of a currently-existing physical range added via
15482    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
15483    /// exactly one physical range, and must not overlap with any other
15484    /// physical range.
15485    ///
15486    /// is_covering_range_explicit - When true, the covering range must be one
15487    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15488    ///     possibly modified since.  When false, the covering range must not
15489    ///     be one of the ranges explicitly created via
15490    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
15491    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
15492    ///     covering range is typically the entire physical range (or a range
15493    ///     which covers even more) of a heap configured by the TEE and whose
15494    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15495    ///
15496    /// Ongoing DMA is not disrupted by this request.
15497    ///
15498    /// Errors:
15499    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15500    ///     Unexpected heap.
15501    ///   * UNSPECIFIED - generic internal error (such as in communication
15502    ///     with TEE which doesn't generate zx_status_t errors).
15503    ///   * other errors are possible, such as from communication failures or
15504    ///     server propagation of failures.
15505    pub fn r#zero_sub_range(
15506        &self,
15507        mut payload: &SecureMemZeroSubRangeRequest,
15508    ) -> fidl::client::QueryResponseFut<
15509        SecureMemZeroSubRangeResult,
15510        fidl::encoding::DefaultFuchsiaResourceDialect,
15511    > {
15512        SecureMemProxyInterface::r#zero_sub_range(self, payload)
15513    }
15514}
15515
15516impl SecureMemProxyInterface for SecureMemProxy {
15517    type GetPhysicalSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15518        SecureMemGetPhysicalSecureHeapsResult,
15519        fidl::encoding::DefaultFuchsiaResourceDialect,
15520    >;
15521    fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut {
15522        fn _decode(
15523            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15524        ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
15525            let _response = fidl::client::decode_transaction_body::<
15526                fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
15527                fidl::encoding::DefaultFuchsiaResourceDialect,
15528                0x38716300592073e3,
15529            >(_buf?)?
15530            .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
15531            Ok(_response.map(|x| x))
15532        }
15533        self.client.send_query_and_decode::<
15534            fidl::encoding::EmptyPayload,
15535            SecureMemGetPhysicalSecureHeapsResult,
15536        >(
15537            (),
15538            0x38716300592073e3,
15539            fidl::encoding::DynamicFlags::FLEXIBLE,
15540            _decode,
15541        )
15542    }
15543
15544    type GetDynamicSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15545        SecureMemGetDynamicSecureHeapsResult,
15546        fidl::encoding::DefaultFuchsiaResourceDialect,
15547    >;
15548    fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut {
15549        fn _decode(
15550            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15551        ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
15552            let _response = fidl::client::decode_transaction_body::<
15553                fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
15554                fidl::encoding::DefaultFuchsiaResourceDialect,
15555                0x1190847f99952834,
15556            >(_buf?)?
15557            .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
15558            Ok(_response.map(|x| x))
15559        }
15560        self.client.send_query_and_decode::<
15561            fidl::encoding::EmptyPayload,
15562            SecureMemGetDynamicSecureHeapsResult,
15563        >(
15564            (),
15565            0x1190847f99952834,
15566            fidl::encoding::DynamicFlags::FLEXIBLE,
15567            _decode,
15568        )
15569    }
15570
15571    type GetPhysicalSecureHeapPropertiesResponseFut = fidl::client::QueryResponseFut<
15572        SecureMemGetPhysicalSecureHeapPropertiesResult,
15573        fidl::encoding::DefaultFuchsiaResourceDialect,
15574    >;
15575    fn r#get_physical_secure_heap_properties(
15576        &self,
15577        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15578    ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut {
15579        fn _decode(
15580            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15581        ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
15582            let _response = fidl::client::decode_transaction_body::<
15583                fidl::encoding::FlexibleResultType<
15584                    SecureMemGetPhysicalSecureHeapPropertiesResponse,
15585                    Error,
15586                >,
15587                fidl::encoding::DefaultFuchsiaResourceDialect,
15588                0xc6f06889009c7bc,
15589            >(_buf?)?
15590            .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
15591            Ok(_response.map(|x| x))
15592        }
15593        self.client.send_query_and_decode::<
15594            SecureMemGetPhysicalSecureHeapPropertiesRequest,
15595            SecureMemGetPhysicalSecureHeapPropertiesResult,
15596        >(
15597            payload,
15598            0xc6f06889009c7bc,
15599            fidl::encoding::DynamicFlags::FLEXIBLE,
15600            _decode,
15601        )
15602    }
15603
15604    type AddSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15605        SecureMemAddSecureHeapPhysicalRangeResult,
15606        fidl::encoding::DefaultFuchsiaResourceDialect,
15607    >;
15608    fn r#add_secure_heap_physical_range(
15609        &self,
15610        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15611    ) -> Self::AddSecureHeapPhysicalRangeResponseFut {
15612        fn _decode(
15613            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15614        ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15615            let _response = fidl::client::decode_transaction_body::<
15616                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15617                fidl::encoding::DefaultFuchsiaResourceDialect,
15618                0x35f695b9b6c7217a,
15619            >(_buf?)?
15620            .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15621            Ok(_response.map(|x| x))
15622        }
15623        self.client.send_query_and_decode::<
15624            SecureMemAddSecureHeapPhysicalRangeRequest,
15625            SecureMemAddSecureHeapPhysicalRangeResult,
15626        >(
15627            payload,
15628            0x35f695b9b6c7217a,
15629            fidl::encoding::DynamicFlags::FLEXIBLE,
15630            _decode,
15631        )
15632    }
15633
15634    type DeleteSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15635        SecureMemDeleteSecureHeapPhysicalRangeResult,
15636        fidl::encoding::DefaultFuchsiaResourceDialect,
15637    >;
15638    fn r#delete_secure_heap_physical_range(
15639        &self,
15640        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15641    ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut {
15642        fn _decode(
15643            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15644        ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15645            let _response = fidl::client::decode_transaction_body::<
15646                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15647                fidl::encoding::DefaultFuchsiaResourceDialect,
15648                0xeaa58c650264c9e,
15649            >(_buf?)?
15650            .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15651            Ok(_response.map(|x| x))
15652        }
15653        self.client.send_query_and_decode::<
15654            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15655            SecureMemDeleteSecureHeapPhysicalRangeResult,
15656        >(
15657            payload,
15658            0xeaa58c650264c9e,
15659            fidl::encoding::DynamicFlags::FLEXIBLE,
15660            _decode,
15661        )
15662    }
15663
15664    type ModifySecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15665        SecureMemModifySecureHeapPhysicalRangeResult,
15666        fidl::encoding::DefaultFuchsiaResourceDialect,
15667    >;
15668    fn r#modify_secure_heap_physical_range(
15669        &self,
15670        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15671    ) -> Self::ModifySecureHeapPhysicalRangeResponseFut {
15672        fn _decode(
15673            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15674        ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15675            let _response = fidl::client::decode_transaction_body::<
15676                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15677                fidl::encoding::DefaultFuchsiaResourceDialect,
15678                0x60b7448aa1187734,
15679            >(_buf?)?
15680            .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15681            Ok(_response.map(|x| x))
15682        }
15683        self.client.send_query_and_decode::<
15684            SecureMemModifySecureHeapPhysicalRangeRequest,
15685            SecureMemModifySecureHeapPhysicalRangeResult,
15686        >(
15687            payload,
15688            0x60b7448aa1187734,
15689            fidl::encoding::DynamicFlags::FLEXIBLE,
15690            _decode,
15691        )
15692    }
15693
15694    type ZeroSubRangeResponseFut = fidl::client::QueryResponseFut<
15695        SecureMemZeroSubRangeResult,
15696        fidl::encoding::DefaultFuchsiaResourceDialect,
15697    >;
15698    fn r#zero_sub_range(
15699        &self,
15700        mut payload: &SecureMemZeroSubRangeRequest,
15701    ) -> Self::ZeroSubRangeResponseFut {
15702        fn _decode(
15703            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15704        ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15705            let _response = fidl::client::decode_transaction_body::<
15706                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15707                fidl::encoding::DefaultFuchsiaResourceDialect,
15708                0x5b25b7901a385ce5,
15709            >(_buf?)?
15710            .into_result::<SecureMemMarker>("zero_sub_range")?;
15711            Ok(_response.map(|x| x))
15712        }
15713        self.client
15714            .send_query_and_decode::<SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResult>(
15715                payload,
15716                0x5b25b7901a385ce5,
15717                fidl::encoding::DynamicFlags::FLEXIBLE,
15718                _decode,
15719            )
15720    }
15721}
15722
15723pub struct SecureMemEventStream {
15724    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
15725}
15726
15727impl std::marker::Unpin for SecureMemEventStream {}
15728
15729impl futures::stream::FusedStream for SecureMemEventStream {
15730    fn is_terminated(&self) -> bool {
15731        self.event_receiver.is_terminated()
15732    }
15733}
15734
15735impl futures::Stream for SecureMemEventStream {
15736    type Item = Result<SecureMemEvent, fidl::Error>;
15737
15738    fn poll_next(
15739        mut self: std::pin::Pin<&mut Self>,
15740        cx: &mut std::task::Context<'_>,
15741    ) -> std::task::Poll<Option<Self::Item>> {
15742        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
15743            &mut self.event_receiver,
15744            cx
15745        )?) {
15746            Some(buf) => std::task::Poll::Ready(Some(SecureMemEvent::decode(buf))),
15747            None => std::task::Poll::Ready(None),
15748        }
15749    }
15750}
15751
15752#[derive(Debug)]
15753pub enum SecureMemEvent {
15754    #[non_exhaustive]
15755    _UnknownEvent {
15756        /// Ordinal of the event that was sent.
15757        ordinal: u64,
15758    },
15759}
15760
15761impl SecureMemEvent {
15762    /// Decodes a message buffer as a [`SecureMemEvent`].
15763    fn decode(
15764        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
15765    ) -> Result<SecureMemEvent, fidl::Error> {
15766        let (bytes, _handles) = buf.split_mut();
15767        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15768        debug_assert_eq!(tx_header.tx_id, 0);
15769        match tx_header.ordinal {
15770            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
15771                Ok(SecureMemEvent::_UnknownEvent { ordinal: tx_header.ordinal })
15772            }
15773            _ => Err(fidl::Error::UnknownOrdinal {
15774                ordinal: tx_header.ordinal,
15775                protocol_name: <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15776            }),
15777        }
15778    }
15779}
15780
15781/// A Stream of incoming requests for fuchsia.sysmem2/SecureMem.
15782pub struct SecureMemRequestStream {
15783    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15784    is_terminated: bool,
15785}
15786
15787impl std::marker::Unpin for SecureMemRequestStream {}
15788
15789impl futures::stream::FusedStream for SecureMemRequestStream {
15790    fn is_terminated(&self) -> bool {
15791        self.is_terminated
15792    }
15793}
15794
15795impl fidl::endpoints::RequestStream for SecureMemRequestStream {
15796    type Protocol = SecureMemMarker;
15797    type ControlHandle = SecureMemControlHandle;
15798
15799    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
15800        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
15801    }
15802
15803    fn control_handle(&self) -> Self::ControlHandle {
15804        SecureMemControlHandle { inner: self.inner.clone() }
15805    }
15806
15807    fn into_inner(
15808        self,
15809    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
15810    {
15811        (self.inner, self.is_terminated)
15812    }
15813
15814    fn from_inner(
15815        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15816        is_terminated: bool,
15817    ) -> Self {
15818        Self { inner, is_terminated }
15819    }
15820}
15821
15822impl futures::Stream for SecureMemRequestStream {
15823    type Item = Result<SecureMemRequest, fidl::Error>;
15824
15825    fn poll_next(
15826        mut self: std::pin::Pin<&mut Self>,
15827        cx: &mut std::task::Context<'_>,
15828    ) -> std::task::Poll<Option<Self::Item>> {
15829        let this = &mut *self;
15830        if this.inner.check_shutdown(cx) {
15831            this.is_terminated = true;
15832            return std::task::Poll::Ready(None);
15833        }
15834        if this.is_terminated {
15835            panic!("polled SecureMemRequestStream after completion");
15836        }
15837        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
15838            |bytes, handles| {
15839                match this.inner.channel().read_etc(cx, bytes, handles) {
15840                    std::task::Poll::Ready(Ok(())) => {}
15841                    std::task::Poll::Pending => return std::task::Poll::Pending,
15842                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
15843                        this.is_terminated = true;
15844                        return std::task::Poll::Ready(None);
15845                    }
15846                    std::task::Poll::Ready(Err(e)) => {
15847                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
15848                            e.into(),
15849                        ))));
15850                    }
15851                }
15852
15853                // A message has been received from the channel
15854                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15855
15856                std::task::Poll::Ready(Some(match header.ordinal {
15857                    0x38716300592073e3 => {
15858                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15859                        let mut req = fidl::new_empty!(
15860                            fidl::encoding::EmptyPayload,
15861                            fidl::encoding::DefaultFuchsiaResourceDialect
15862                        );
15863                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15864                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15865                        Ok(SecureMemRequest::GetPhysicalSecureHeaps {
15866                            responder: SecureMemGetPhysicalSecureHeapsResponder {
15867                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15868                                tx_id: header.tx_id,
15869                            },
15870                        })
15871                    }
15872                    0x1190847f99952834 => {
15873                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15874                        let mut req = fidl::new_empty!(
15875                            fidl::encoding::EmptyPayload,
15876                            fidl::encoding::DefaultFuchsiaResourceDialect
15877                        );
15878                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15879                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15880                        Ok(SecureMemRequest::GetDynamicSecureHeaps {
15881                            responder: SecureMemGetDynamicSecureHeapsResponder {
15882                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15883                                tx_id: header.tx_id,
15884                            },
15885                        })
15886                    }
15887                    0xc6f06889009c7bc => {
15888                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15889                        let mut req = fidl::new_empty!(
15890                            SecureMemGetPhysicalSecureHeapPropertiesRequest,
15891                            fidl::encoding::DefaultFuchsiaResourceDialect
15892                        );
15893                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemGetPhysicalSecureHeapPropertiesRequest>(&header, _body_bytes, handles, &mut req)?;
15894                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15895                        Ok(SecureMemRequest::GetPhysicalSecureHeapProperties {
15896                            payload: req,
15897                            responder: SecureMemGetPhysicalSecureHeapPropertiesResponder {
15898                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15899                                tx_id: header.tx_id,
15900                            },
15901                        })
15902                    }
15903                    0x35f695b9b6c7217a => {
15904                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15905                        let mut req = fidl::new_empty!(
15906                            SecureMemAddSecureHeapPhysicalRangeRequest,
15907                            fidl::encoding::DefaultFuchsiaResourceDialect
15908                        );
15909                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemAddSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15910                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15911                        Ok(SecureMemRequest::AddSecureHeapPhysicalRange {
15912                            payload: req,
15913                            responder: SecureMemAddSecureHeapPhysicalRangeResponder {
15914                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15915                                tx_id: header.tx_id,
15916                            },
15917                        })
15918                    }
15919                    0xeaa58c650264c9e => {
15920                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15921                        let mut req = fidl::new_empty!(
15922                            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15923                            fidl::encoding::DefaultFuchsiaResourceDialect
15924                        );
15925                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemDeleteSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15926                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15927                        Ok(SecureMemRequest::DeleteSecureHeapPhysicalRange {
15928                            payload: req,
15929                            responder: SecureMemDeleteSecureHeapPhysicalRangeResponder {
15930                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15931                                tx_id: header.tx_id,
15932                            },
15933                        })
15934                    }
15935                    0x60b7448aa1187734 => {
15936                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15937                        let mut req = fidl::new_empty!(
15938                            SecureMemModifySecureHeapPhysicalRangeRequest,
15939                            fidl::encoding::DefaultFuchsiaResourceDialect
15940                        );
15941                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemModifySecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15942                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15943                        Ok(SecureMemRequest::ModifySecureHeapPhysicalRange {
15944                            payload: req,
15945                            responder: SecureMemModifySecureHeapPhysicalRangeResponder {
15946                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15947                                tx_id: header.tx_id,
15948                            },
15949                        })
15950                    }
15951                    0x5b25b7901a385ce5 => {
15952                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15953                        let mut req = fidl::new_empty!(
15954                            SecureMemZeroSubRangeRequest,
15955                            fidl::encoding::DefaultFuchsiaResourceDialect
15956                        );
15957                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemZeroSubRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15958                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15959                        Ok(SecureMemRequest::ZeroSubRange {
15960                            payload: req,
15961                            responder: SecureMemZeroSubRangeResponder {
15962                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15963                                tx_id: header.tx_id,
15964                            },
15965                        })
15966                    }
15967                    _ if header.tx_id == 0
15968                        && header
15969                            .dynamic_flags()
15970                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15971                    {
15972                        Ok(SecureMemRequest::_UnknownMethod {
15973                            ordinal: header.ordinal,
15974                            control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15975                            method_type: fidl::MethodType::OneWay,
15976                        })
15977                    }
15978                    _ if header
15979                        .dynamic_flags()
15980                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15981                    {
15982                        this.inner.send_framework_err(
15983                            fidl::encoding::FrameworkErr::UnknownMethod,
15984                            header.tx_id,
15985                            header.ordinal,
15986                            header.dynamic_flags(),
15987                            (bytes, handles),
15988                        )?;
15989                        Ok(SecureMemRequest::_UnknownMethod {
15990                            ordinal: header.ordinal,
15991                            control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15992                            method_type: fidl::MethodType::TwoWay,
15993                        })
15994                    }
15995                    _ => Err(fidl::Error::UnknownOrdinal {
15996                        ordinal: header.ordinal,
15997                        protocol_name:
15998                            <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15999                    }),
16000                }))
16001            },
16002        )
16003    }
16004}
16005
16006/// SecureMem
16007///
16008/// The client is sysmem.  The server is securemem driver.
16009///
16010/// TEE - Trusted Execution Environment.
16011///
16012/// REE - Rich Execution Environment.
16013///
16014/// Enables sysmem to call the securemem driver to get any secure heaps
16015/// configured via the TEE (or via the securemem driver), and set any physical
16016/// secure heaps configured via sysmem.
16017///
16018/// Presently, dynamically-allocated secure heaps are configured via sysmem, as
16019/// it starts quite early during boot and can successfully reserve contiguous
16020/// physical memory.  Presently, fixed-location secure heaps are configured via
16021/// TEE, as the plumbing goes from the bootloader to the TEE.  However, this
16022/// protocol intentionally doesn't care which heaps are dynamically-allocated
16023/// and which are fixed-location.
16024#[derive(Debug)]
16025pub enum SecureMemRequest {
16026    /// Gets the physical address and length of any secure heap whose physical
16027    /// range is configured via the TEE.
16028    ///
16029    /// Presently, these will be fixed physical addresses and lengths, with the
16030    /// location plumbed via the TEE.
16031    ///
16032    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
16033    /// when there isn't any special heap-specific per-VMO setup or teardown
16034    /// required.
16035    ///
16036    /// The physical range must be secured/protected by the TEE before the
16037    /// securemem driver responds to this request with success.
16038    ///
16039    /// Sysmem should only call this once.  Returning zero heaps is not a
16040    /// failure.
16041    ///
16042    /// Errors:
16043    ///  * PROTOCOL_DEVIATION - called more than once.
16044    ///  * UNSPECIFIED - generic internal error (such as in communication
16045    ///    with TEE which doesn't generate zx_status_t errors).
16046    ///  * other errors are allowed; any other errors should be treated the same
16047    ///    as UNSPECIFIED.
16048    GetPhysicalSecureHeaps { responder: SecureMemGetPhysicalSecureHeapsResponder },
16049    /// Gets information about any secure heaps whose physical pages are not
16050    /// configured by the TEE, but by sysmem.
16051    ///
16052    /// Sysmem should only call this once. Returning zero heaps is not a
16053    /// failure.
16054    ///
16055    /// Errors:
16056    ///  * PROTOCOL_DEVIATION - called more than once.
16057    ///  * UNSPECIFIED - generic internal error (such as in communication
16058    ///    with TEE which doesn't generate zx_status_t errors).
16059    ///  * other errors are allowed; any other errors should be treated the same
16060    ///    as UNSPECIFIED.
16061    GetDynamicSecureHeaps { responder: SecureMemGetDynamicSecureHeapsResponder },
16062    /// This request from sysmem to the securemem driver gets the properties of
16063    /// a protected/secure heap.
16064    ///
16065    /// This only handles heaps with a single contiguous physical extent.
16066    ///
16067    /// The heap's entire physical range is indicated in case this request needs
16068    /// some physical space to auto-detect how many ranges are REE-usable.  Any
16069    /// temporary HW protection ranges will be deleted before this request
16070    /// completes.
16071    ///
16072    /// Errors:
16073    ///  * UNSPECIFIED - generic internal error (such as in communication
16074    ///    with TEE which doesn't generate zx_status_t errors).
16075    ///  * other errors are allowed; any other errors should be treated the same
16076    ///    as UNSPECIFIED.
16077    GetPhysicalSecureHeapProperties {
16078        payload: SecureMemGetPhysicalSecureHeapPropertiesRequest,
16079        responder: SecureMemGetPhysicalSecureHeapPropertiesResponder,
16080    },
16081    /// This request from sysmem to the securemem driver conveys a physical
16082    /// range to add, for a heap whose physical range(s) are set up via
16083    /// sysmem.
16084    ///
16085    /// Only sysmem can call this because only sysmem is handed the client end
16086    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
16087    /// securemem driver is the server end of this protocol.
16088    ///
16089    /// The securemem driver must configure all the covered offsets as protected
16090    /// before responding to this message with success.
16091    ///
16092    /// On failure, the securemem driver must ensure the protected range was not
16093    /// created.
16094    ///
16095    /// Sysmem must only call this up to once if dynamic_protection_ranges
16096    /// false.
16097    ///
16098    /// If dynamic_protection_ranges is true, sysmem can call this multiple
16099    /// times as long as the current number of ranges never exceeds
16100    /// max_protected_range_count.
16101    ///
16102    /// The caller must not attempt to add a range that matches an
16103    /// already-existing range.  Added ranges can overlap each other as long as
16104    /// no two ranges match exactly.
16105    ///
16106    /// Errors:
16107    ///   * PROTOCOL_DEVIATION - called more than once when
16108    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
16109    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
16110    ///     range that doesn't conform to protected_range_granularity. See log.
16111    ///   * UNSPECIFIED - generic internal error (such as in communication
16112    ///     with TEE which doesn't generate zx_status_t errors).
16113    ///   * other errors are possible, such as from communication failures or
16114    ///     server propagation of failures.
16115    AddSecureHeapPhysicalRange {
16116        payload: SecureMemAddSecureHeapPhysicalRangeRequest,
16117        responder: SecureMemAddSecureHeapPhysicalRangeResponder,
16118    },
16119    /// This request from sysmem to the securemem driver conveys a physical
16120    /// range to delete, for a heap whose physical range(s) are set up via
16121    /// sysmem.
16122    ///
16123    /// Only sysmem can call this because only sysmem is handed the client end
16124    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
16125    /// securemem driver is the server end of this protocol.
16126    ///
16127    /// The securemem driver must configure all the covered offsets as not
16128    /// protected before responding to this message with success.
16129    ///
16130    /// On failure, the securemem driver must ensure the protected range was not
16131    /// deleted.
16132    ///
16133    /// Sysmem must not call this if dynamic_protection_ranges false.
16134    ///
16135    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16136    /// on various ranges that exist at the time of the call.
16137    ///
16138    /// If any portion of the range being deleted is not also covered by another
16139    /// protected range, then any ongoing DMA to any part of the entire range
16140    /// may be interrupted / may fail, potentially in a way that's disruptive to
16141    /// the entire system (bus lockup or similar, depending on device details).
16142    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
16143    /// any portion of the range being deleted, unless the caller has other
16144    /// active ranges covering every block of the range being deleted.  Ongoing
16145    /// DMA to/from blocks outside the range being deleted is never impacted by
16146    /// the deletion.
16147    ///
16148    /// Errors:
16149    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16150    ///     Unexpected heap, or range that doesn't conform to
16151    ///     protected_range_granularity.
16152    ///   * UNSPECIFIED - generic internal error (such as in communication
16153    ///     with TEE which doesn't generate zx_status_t errors).
16154    ///   * NOT_FOUND - the specified range is not found.
16155    ///   * other errors are possible, such as from communication failures or
16156    ///     server propagation of failures.
16157    DeleteSecureHeapPhysicalRange {
16158        payload: SecureMemDeleteSecureHeapPhysicalRangeRequest,
16159        responder: SecureMemDeleteSecureHeapPhysicalRangeResponder,
16160    },
16161    /// This request from sysmem to the securemem driver conveys a physical
16162    /// range to modify and its new base and length, for a heap whose physical
16163    /// range(s) are set up via sysmem.
16164    ///
16165    /// Only sysmem can call this because only sysmem is handed the client end
16166    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
16167    /// securemem driver is the server end of this protocol.
16168    ///
16169    /// The securemem driver must configure the range to cover only the new
16170    /// offsets before responding to this message with success.
16171    ///
16172    /// On failure, the securemem driver must ensure the range was not changed.
16173    ///
16174    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
16175    /// must not call this if !is_mod_protected_range_available.
16176    ///
16177    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16178    /// on various ranges that exist at the time of the call.
16179    ///
16180    /// The range must only be modified at one end or the other, but not both.
16181    /// If the range is getting shorter, and the un-covered blocks are not
16182    /// covered by other active ranges, any ongoing DMA to the entire range
16183    /// that's geting shorter may fail in a way that disrupts the entire system
16184    /// (bus lockup or similar), so the caller must ensure that no DMA is
16185    /// ongoing to any portion of a range that is getting shorter, unless the
16186    /// blocks being un-covered by the modification to this range are all
16187    /// covered by other active ranges, in which case no disruption to ongoing
16188    /// DMA will occur.
16189    ///
16190    /// If a range is modified to become <= zero length, the range is deleted.
16191    ///
16192    /// Errors:
16193    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16194    ///     Unexpected heap, or old_range or new_range that doesn't conform to
16195    ///     protected_range_granularity, or old_range and new_range differ in
16196    ///     both begin and end (disallowed).
16197    ///   * UNSPECIFIED - generic internal error (such as in communication
16198    ///     with TEE which doesn't generate zx_status_t errors).
16199    ///   * NOT_FOUND - the specified range is not found.
16200    ///   * other errors are possible, such as from communication failures or
16201    ///     server propagation of failures.
16202    ModifySecureHeapPhysicalRange {
16203        payload: SecureMemModifySecureHeapPhysicalRangeRequest,
16204        responder: SecureMemModifySecureHeapPhysicalRangeResponder,
16205    },
16206    /// Zero a sub-range of a currently-existing physical range added via
16207    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
16208    /// exactly one physical range, and must not overlap with any other
16209    /// physical range.
16210    ///
16211    /// is_covering_range_explicit - When true, the covering range must be one
16212    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
16213    ///     possibly modified since.  When false, the covering range must not
16214    ///     be one of the ranges explicitly created via
16215    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
16216    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
16217    ///     covering range is typically the entire physical range (or a range
16218    ///     which covers even more) of a heap configured by the TEE and whose
16219    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
16220    ///
16221    /// Ongoing DMA is not disrupted by this request.
16222    ///
16223    /// Errors:
16224    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16225    ///     Unexpected heap.
16226    ///   * UNSPECIFIED - generic internal error (such as in communication
16227    ///     with TEE which doesn't generate zx_status_t errors).
16228    ///   * other errors are possible, such as from communication failures or
16229    ///     server propagation of failures.
16230    ZeroSubRange {
16231        payload: SecureMemZeroSubRangeRequest,
16232        responder: SecureMemZeroSubRangeResponder,
16233    },
16234    /// An interaction was received which does not match any known method.
16235    #[non_exhaustive]
16236    _UnknownMethod {
16237        /// Ordinal of the method that was called.
16238        ordinal: u64,
16239        control_handle: SecureMemControlHandle,
16240        method_type: fidl::MethodType,
16241    },
16242}
16243
16244impl SecureMemRequest {
16245    #[allow(irrefutable_let_patterns)]
16246    pub fn into_get_physical_secure_heaps(
16247        self,
16248    ) -> Option<(SecureMemGetPhysicalSecureHeapsResponder)> {
16249        if let SecureMemRequest::GetPhysicalSecureHeaps { responder } = self {
16250            Some((responder))
16251        } else {
16252            None
16253        }
16254    }
16255
16256    #[allow(irrefutable_let_patterns)]
16257    pub fn into_get_dynamic_secure_heaps(
16258        self,
16259    ) -> Option<(SecureMemGetDynamicSecureHeapsResponder)> {
16260        if let SecureMemRequest::GetDynamicSecureHeaps { responder } = self {
16261            Some((responder))
16262        } else {
16263            None
16264        }
16265    }
16266
16267    #[allow(irrefutable_let_patterns)]
16268    pub fn into_get_physical_secure_heap_properties(
16269        self,
16270    ) -> Option<(
16271        SecureMemGetPhysicalSecureHeapPropertiesRequest,
16272        SecureMemGetPhysicalSecureHeapPropertiesResponder,
16273    )> {
16274        if let SecureMemRequest::GetPhysicalSecureHeapProperties { payload, responder } = self {
16275            Some((payload, responder))
16276        } else {
16277            None
16278        }
16279    }
16280
16281    #[allow(irrefutable_let_patterns)]
16282    pub fn into_add_secure_heap_physical_range(
16283        self,
16284    ) -> Option<(
16285        SecureMemAddSecureHeapPhysicalRangeRequest,
16286        SecureMemAddSecureHeapPhysicalRangeResponder,
16287    )> {
16288        if let SecureMemRequest::AddSecureHeapPhysicalRange { payload, responder } = self {
16289            Some((payload, responder))
16290        } else {
16291            None
16292        }
16293    }
16294
16295    #[allow(irrefutable_let_patterns)]
16296    pub fn into_delete_secure_heap_physical_range(
16297        self,
16298    ) -> Option<(
16299        SecureMemDeleteSecureHeapPhysicalRangeRequest,
16300        SecureMemDeleteSecureHeapPhysicalRangeResponder,
16301    )> {
16302        if let SecureMemRequest::DeleteSecureHeapPhysicalRange { payload, responder } = self {
16303            Some((payload, responder))
16304        } else {
16305            None
16306        }
16307    }
16308
16309    #[allow(irrefutable_let_patterns)]
16310    pub fn into_modify_secure_heap_physical_range(
16311        self,
16312    ) -> Option<(
16313        SecureMemModifySecureHeapPhysicalRangeRequest,
16314        SecureMemModifySecureHeapPhysicalRangeResponder,
16315    )> {
16316        if let SecureMemRequest::ModifySecureHeapPhysicalRange { payload, responder } = self {
16317            Some((payload, responder))
16318        } else {
16319            None
16320        }
16321    }
16322
16323    #[allow(irrefutable_let_patterns)]
16324    pub fn into_zero_sub_range(
16325        self,
16326    ) -> Option<(SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResponder)> {
16327        if let SecureMemRequest::ZeroSubRange { payload, responder } = self {
16328            Some((payload, responder))
16329        } else {
16330            None
16331        }
16332    }
16333
16334    /// Name of the method defined in FIDL
16335    pub fn method_name(&self) -> &'static str {
16336        match *self {
16337            SecureMemRequest::GetPhysicalSecureHeaps { .. } => "get_physical_secure_heaps",
16338            SecureMemRequest::GetDynamicSecureHeaps { .. } => "get_dynamic_secure_heaps",
16339            SecureMemRequest::GetPhysicalSecureHeapProperties { .. } => {
16340                "get_physical_secure_heap_properties"
16341            }
16342            SecureMemRequest::AddSecureHeapPhysicalRange { .. } => "add_secure_heap_physical_range",
16343            SecureMemRequest::DeleteSecureHeapPhysicalRange { .. } => {
16344                "delete_secure_heap_physical_range"
16345            }
16346            SecureMemRequest::ModifySecureHeapPhysicalRange { .. } => {
16347                "modify_secure_heap_physical_range"
16348            }
16349            SecureMemRequest::ZeroSubRange { .. } => "zero_sub_range",
16350            SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
16351                "unknown one-way method"
16352            }
16353            SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
16354                "unknown two-way method"
16355            }
16356        }
16357    }
16358}
16359
16360#[derive(Debug, Clone)]
16361pub struct SecureMemControlHandle {
16362    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
16363}
16364
16365impl fidl::endpoints::ControlHandle for SecureMemControlHandle {
16366    fn shutdown(&self) {
16367        self.inner.shutdown()
16368    }
16369
16370    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
16371        self.inner.shutdown_with_epitaph(status)
16372    }
16373
16374    fn is_closed(&self) -> bool {
16375        self.inner.channel().is_closed()
16376    }
16377    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
16378        self.inner.channel().on_closed()
16379    }
16380
16381    #[cfg(target_os = "fuchsia")]
16382    fn signal_peer(
16383        &self,
16384        clear_mask: zx::Signals,
16385        set_mask: zx::Signals,
16386    ) -> Result<(), zx_status::Status> {
16387        use fidl::Peered;
16388        self.inner.channel().signal_peer(clear_mask, set_mask)
16389    }
16390}
16391
16392impl SecureMemControlHandle {}
16393
16394#[must_use = "FIDL methods require a response to be sent"]
16395#[derive(Debug)]
16396pub struct SecureMemGetPhysicalSecureHeapsResponder {
16397    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16398    tx_id: u32,
16399}
16400
16401/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16402/// if the responder is dropped without sending a response, so that the client
16403/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16404impl std::ops::Drop for SecureMemGetPhysicalSecureHeapsResponder {
16405    fn drop(&mut self) {
16406        self.control_handle.shutdown();
16407        // Safety: drops once, never accessed again
16408        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16409    }
16410}
16411
16412impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapsResponder {
16413    type ControlHandle = SecureMemControlHandle;
16414
16415    fn control_handle(&self) -> &SecureMemControlHandle {
16416        &self.control_handle
16417    }
16418
16419    fn drop_without_shutdown(mut self) {
16420        // Safety: drops once, never accessed again due to mem::forget
16421        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16422        // Prevent Drop from running (which would shut down the channel)
16423        std::mem::forget(self);
16424    }
16425}
16426
16427impl SecureMemGetPhysicalSecureHeapsResponder {
16428    /// Sends a response to the FIDL transaction.
16429    ///
16430    /// Sets the channel to shutdown if an error occurs.
16431    pub fn send(
16432        self,
16433        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16434    ) -> Result<(), fidl::Error> {
16435        let _result = self.send_raw(result);
16436        if _result.is_err() {
16437            self.control_handle.shutdown();
16438        }
16439        self.drop_without_shutdown();
16440        _result
16441    }
16442
16443    /// Similar to "send" but does not shutdown the channel if an error occurs.
16444    pub fn send_no_shutdown_on_err(
16445        self,
16446        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16447    ) -> Result<(), fidl::Error> {
16448        let _result = self.send_raw(result);
16449        self.drop_without_shutdown();
16450        _result
16451    }
16452
16453    fn send_raw(
16454        &self,
16455        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16456    ) -> Result<(), fidl::Error> {
16457        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16458            SecureMemGetPhysicalSecureHeapsResponse,
16459            Error,
16460        >>(
16461            fidl::encoding::FlexibleResult::new(result),
16462            self.tx_id,
16463            0x38716300592073e3,
16464            fidl::encoding::DynamicFlags::FLEXIBLE,
16465        )
16466    }
16467}
16468
16469#[must_use = "FIDL methods require a response to be sent"]
16470#[derive(Debug)]
16471pub struct SecureMemGetDynamicSecureHeapsResponder {
16472    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16473    tx_id: u32,
16474}
16475
16476/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16477/// if the responder is dropped without sending a response, so that the client
16478/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16479impl std::ops::Drop for SecureMemGetDynamicSecureHeapsResponder {
16480    fn drop(&mut self) {
16481        self.control_handle.shutdown();
16482        // Safety: drops once, never accessed again
16483        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16484    }
16485}
16486
16487impl fidl::endpoints::Responder for SecureMemGetDynamicSecureHeapsResponder {
16488    type ControlHandle = SecureMemControlHandle;
16489
16490    fn control_handle(&self) -> &SecureMemControlHandle {
16491        &self.control_handle
16492    }
16493
16494    fn drop_without_shutdown(mut self) {
16495        // Safety: drops once, never accessed again due to mem::forget
16496        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16497        // Prevent Drop from running (which would shut down the channel)
16498        std::mem::forget(self);
16499    }
16500}
16501
16502impl SecureMemGetDynamicSecureHeapsResponder {
16503    /// Sends a response to the FIDL transaction.
16504    ///
16505    /// Sets the channel to shutdown if an error occurs.
16506    pub fn send(
16507        self,
16508        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16509    ) -> Result<(), fidl::Error> {
16510        let _result = self.send_raw(result);
16511        if _result.is_err() {
16512            self.control_handle.shutdown();
16513        }
16514        self.drop_without_shutdown();
16515        _result
16516    }
16517
16518    /// Similar to "send" but does not shutdown the channel if an error occurs.
16519    pub fn send_no_shutdown_on_err(
16520        self,
16521        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16522    ) -> Result<(), fidl::Error> {
16523        let _result = self.send_raw(result);
16524        self.drop_without_shutdown();
16525        _result
16526    }
16527
16528    fn send_raw(
16529        &self,
16530        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16531    ) -> Result<(), fidl::Error> {
16532        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16533            SecureMemGetDynamicSecureHeapsResponse,
16534            Error,
16535        >>(
16536            fidl::encoding::FlexibleResult::new(result),
16537            self.tx_id,
16538            0x1190847f99952834,
16539            fidl::encoding::DynamicFlags::FLEXIBLE,
16540        )
16541    }
16542}
16543
16544#[must_use = "FIDL methods require a response to be sent"]
16545#[derive(Debug)]
16546pub struct SecureMemGetPhysicalSecureHeapPropertiesResponder {
16547    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16548    tx_id: u32,
16549}
16550
16551/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16552/// if the responder is dropped without sending a response, so that the client
16553/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16554impl std::ops::Drop for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16555    fn drop(&mut self) {
16556        self.control_handle.shutdown();
16557        // Safety: drops once, never accessed again
16558        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16559    }
16560}
16561
16562impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16563    type ControlHandle = SecureMemControlHandle;
16564
16565    fn control_handle(&self) -> &SecureMemControlHandle {
16566        &self.control_handle
16567    }
16568
16569    fn drop_without_shutdown(mut self) {
16570        // Safety: drops once, never accessed again due to mem::forget
16571        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16572        // Prevent Drop from running (which would shut down the channel)
16573        std::mem::forget(self);
16574    }
16575}
16576
16577impl SecureMemGetPhysicalSecureHeapPropertiesResponder {
16578    /// Sends a response to the FIDL transaction.
16579    ///
16580    /// Sets the channel to shutdown if an error occurs.
16581    pub fn send(
16582        self,
16583        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16584    ) -> Result<(), fidl::Error> {
16585        let _result = self.send_raw(result);
16586        if _result.is_err() {
16587            self.control_handle.shutdown();
16588        }
16589        self.drop_without_shutdown();
16590        _result
16591    }
16592
16593    /// Similar to "send" but does not shutdown the channel if an error occurs.
16594    pub fn send_no_shutdown_on_err(
16595        self,
16596        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16597    ) -> Result<(), fidl::Error> {
16598        let _result = self.send_raw(result);
16599        self.drop_without_shutdown();
16600        _result
16601    }
16602
16603    fn send_raw(
16604        &self,
16605        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16606    ) -> Result<(), fidl::Error> {
16607        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16608            SecureMemGetPhysicalSecureHeapPropertiesResponse,
16609            Error,
16610        >>(
16611            fidl::encoding::FlexibleResult::new(result),
16612            self.tx_id,
16613            0xc6f06889009c7bc,
16614            fidl::encoding::DynamicFlags::FLEXIBLE,
16615        )
16616    }
16617}
16618
16619#[must_use = "FIDL methods require a response to be sent"]
16620#[derive(Debug)]
16621pub struct SecureMemAddSecureHeapPhysicalRangeResponder {
16622    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16623    tx_id: u32,
16624}
16625
16626/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16627/// if the responder is dropped without sending a response, so that the client
16628/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16629impl std::ops::Drop for SecureMemAddSecureHeapPhysicalRangeResponder {
16630    fn drop(&mut self) {
16631        self.control_handle.shutdown();
16632        // Safety: drops once, never accessed again
16633        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16634    }
16635}
16636
16637impl fidl::endpoints::Responder for SecureMemAddSecureHeapPhysicalRangeResponder {
16638    type ControlHandle = SecureMemControlHandle;
16639
16640    fn control_handle(&self) -> &SecureMemControlHandle {
16641        &self.control_handle
16642    }
16643
16644    fn drop_without_shutdown(mut self) {
16645        // Safety: drops once, never accessed again due to mem::forget
16646        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16647        // Prevent Drop from running (which would shut down the channel)
16648        std::mem::forget(self);
16649    }
16650}
16651
16652impl SecureMemAddSecureHeapPhysicalRangeResponder {
16653    /// Sends a response to the FIDL transaction.
16654    ///
16655    /// Sets the channel to shutdown if an error occurs.
16656    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16657        let _result = self.send_raw(result);
16658        if _result.is_err() {
16659            self.control_handle.shutdown();
16660        }
16661        self.drop_without_shutdown();
16662        _result
16663    }
16664
16665    /// Similar to "send" but does not shutdown the channel if an error occurs.
16666    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16667        let _result = self.send_raw(result);
16668        self.drop_without_shutdown();
16669        _result
16670    }
16671
16672    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16673        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16674            fidl::encoding::EmptyStruct,
16675            Error,
16676        >>(
16677            fidl::encoding::FlexibleResult::new(result),
16678            self.tx_id,
16679            0x35f695b9b6c7217a,
16680            fidl::encoding::DynamicFlags::FLEXIBLE,
16681        )
16682    }
16683}
16684
16685#[must_use = "FIDL methods require a response to be sent"]
16686#[derive(Debug)]
16687pub struct SecureMemDeleteSecureHeapPhysicalRangeResponder {
16688    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16689    tx_id: u32,
16690}
16691
16692/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16693/// if the responder is dropped without sending a response, so that the client
16694/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16695impl std::ops::Drop for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16696    fn drop(&mut self) {
16697        self.control_handle.shutdown();
16698        // Safety: drops once, never accessed again
16699        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16700    }
16701}
16702
16703impl fidl::endpoints::Responder for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16704    type ControlHandle = SecureMemControlHandle;
16705
16706    fn control_handle(&self) -> &SecureMemControlHandle {
16707        &self.control_handle
16708    }
16709
16710    fn drop_without_shutdown(mut self) {
16711        // Safety: drops once, never accessed again due to mem::forget
16712        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16713        // Prevent Drop from running (which would shut down the channel)
16714        std::mem::forget(self);
16715    }
16716}
16717
16718impl SecureMemDeleteSecureHeapPhysicalRangeResponder {
16719    /// Sends a response to the FIDL transaction.
16720    ///
16721    /// Sets the channel to shutdown if an error occurs.
16722    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16723        let _result = self.send_raw(result);
16724        if _result.is_err() {
16725            self.control_handle.shutdown();
16726        }
16727        self.drop_without_shutdown();
16728        _result
16729    }
16730
16731    /// Similar to "send" but does not shutdown the channel if an error occurs.
16732    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16733        let _result = self.send_raw(result);
16734        self.drop_without_shutdown();
16735        _result
16736    }
16737
16738    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16739        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16740            fidl::encoding::EmptyStruct,
16741            Error,
16742        >>(
16743            fidl::encoding::FlexibleResult::new(result),
16744            self.tx_id,
16745            0xeaa58c650264c9e,
16746            fidl::encoding::DynamicFlags::FLEXIBLE,
16747        )
16748    }
16749}
16750
16751#[must_use = "FIDL methods require a response to be sent"]
16752#[derive(Debug)]
16753pub struct SecureMemModifySecureHeapPhysicalRangeResponder {
16754    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16755    tx_id: u32,
16756}
16757
16758/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16759/// if the responder is dropped without sending a response, so that the client
16760/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16761impl std::ops::Drop for SecureMemModifySecureHeapPhysicalRangeResponder {
16762    fn drop(&mut self) {
16763        self.control_handle.shutdown();
16764        // Safety: drops once, never accessed again
16765        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16766    }
16767}
16768
16769impl fidl::endpoints::Responder for SecureMemModifySecureHeapPhysicalRangeResponder {
16770    type ControlHandle = SecureMemControlHandle;
16771
16772    fn control_handle(&self) -> &SecureMemControlHandle {
16773        &self.control_handle
16774    }
16775
16776    fn drop_without_shutdown(mut self) {
16777        // Safety: drops once, never accessed again due to mem::forget
16778        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16779        // Prevent Drop from running (which would shut down the channel)
16780        std::mem::forget(self);
16781    }
16782}
16783
16784impl SecureMemModifySecureHeapPhysicalRangeResponder {
16785    /// Sends a response to the FIDL transaction.
16786    ///
16787    /// Sets the channel to shutdown if an error occurs.
16788    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16789        let _result = self.send_raw(result);
16790        if _result.is_err() {
16791            self.control_handle.shutdown();
16792        }
16793        self.drop_without_shutdown();
16794        _result
16795    }
16796
16797    /// Similar to "send" but does not shutdown the channel if an error occurs.
16798    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16799        let _result = self.send_raw(result);
16800        self.drop_without_shutdown();
16801        _result
16802    }
16803
16804    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16805        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16806            fidl::encoding::EmptyStruct,
16807            Error,
16808        >>(
16809            fidl::encoding::FlexibleResult::new(result),
16810            self.tx_id,
16811            0x60b7448aa1187734,
16812            fidl::encoding::DynamicFlags::FLEXIBLE,
16813        )
16814    }
16815}
16816
16817#[must_use = "FIDL methods require a response to be sent"]
16818#[derive(Debug)]
16819pub struct SecureMemZeroSubRangeResponder {
16820    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16821    tx_id: u32,
16822}
16823
16824/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16825/// if the responder is dropped without sending a response, so that the client
16826/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16827impl std::ops::Drop for SecureMemZeroSubRangeResponder {
16828    fn drop(&mut self) {
16829        self.control_handle.shutdown();
16830        // Safety: drops once, never accessed again
16831        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16832    }
16833}
16834
16835impl fidl::endpoints::Responder for SecureMemZeroSubRangeResponder {
16836    type ControlHandle = SecureMemControlHandle;
16837
16838    fn control_handle(&self) -> &SecureMemControlHandle {
16839        &self.control_handle
16840    }
16841
16842    fn drop_without_shutdown(mut self) {
16843        // Safety: drops once, never accessed again due to mem::forget
16844        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16845        // Prevent Drop from running (which would shut down the channel)
16846        std::mem::forget(self);
16847    }
16848}
16849
16850impl SecureMemZeroSubRangeResponder {
16851    /// Sends a response to the FIDL transaction.
16852    ///
16853    /// Sets the channel to shutdown if an error occurs.
16854    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16855        let _result = self.send_raw(result);
16856        if _result.is_err() {
16857            self.control_handle.shutdown();
16858        }
16859        self.drop_without_shutdown();
16860        _result
16861    }
16862
16863    /// Similar to "send" but does not shutdown the channel if an error occurs.
16864    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16865        let _result = self.send_raw(result);
16866        self.drop_without_shutdown();
16867        _result
16868    }
16869
16870    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16871        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16872            fidl::encoding::EmptyStruct,
16873            Error,
16874        >>(
16875            fidl::encoding::FlexibleResult::new(result),
16876            self.tx_id,
16877            0x5b25b7901a385ce5,
16878            fidl::encoding::DynamicFlags::FLEXIBLE,
16879        )
16880    }
16881}
16882
16883mod internal {
16884    use super::*;
16885
16886    impl AllocatorAllocateNonSharedCollectionRequest {
16887        #[inline(always)]
16888        fn max_ordinal_present(&self) -> u64 {
16889            if let Some(_) = self.collection_request {
16890                return 1;
16891            }
16892            0
16893        }
16894    }
16895
16896    impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16897        type Borrowed<'a> = &'a mut Self;
16898        fn take_or_borrow<'a>(
16899            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
16900        ) -> Self::Borrowed<'a> {
16901            value
16902        }
16903    }
16904
16905    unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16906        type Owned = Self;
16907
16908        #[inline(always)]
16909        fn inline_align(_context: fidl::encoding::Context) -> usize {
16910            8
16911        }
16912
16913        #[inline(always)]
16914        fn inline_size(_context: fidl::encoding::Context) -> usize {
16915            16
16916        }
16917    }
16918
16919    unsafe impl
16920        fidl::encoding::Encode<
16921            AllocatorAllocateNonSharedCollectionRequest,
16922            fidl::encoding::DefaultFuchsiaResourceDialect,
16923        > for &mut AllocatorAllocateNonSharedCollectionRequest
16924    {
16925        unsafe fn encode(
16926            self,
16927            encoder: &mut fidl::encoding::Encoder<
16928                '_,
16929                fidl::encoding::DefaultFuchsiaResourceDialect,
16930            >,
16931            offset: usize,
16932            mut depth: fidl::encoding::Depth,
16933        ) -> fidl::Result<()> {
16934            encoder.debug_check_bounds::<AllocatorAllocateNonSharedCollectionRequest>(offset);
16935            // Vector header
16936            let max_ordinal: u64 = self.max_ordinal_present();
16937            encoder.write_num(max_ordinal, offset);
16938            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
16939            // Calling encoder.out_of_line_offset(0) is not allowed.
16940            if max_ordinal == 0 {
16941                return Ok(());
16942            }
16943            depth.increment()?;
16944            let envelope_size = 8;
16945            let bytes_len = max_ordinal as usize * envelope_size;
16946            #[allow(unused_variables)]
16947            let offset = encoder.out_of_line_offset(bytes_len);
16948            let mut _prev_end_offset: usize = 0;
16949            if 1 > max_ordinal {
16950                return Ok(());
16951            }
16952
16953            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
16954            // are envelope_size bytes.
16955            let cur_offset: usize = (1 - 1) * envelope_size;
16956
16957            // Zero reserved fields.
16958            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
16959
16960            // Safety:
16961            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
16962            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
16963            //   envelope_size bytes, there is always sufficient room.
16964            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
16965            self.collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
16966            encoder, offset + cur_offset, depth
16967        )?;
16968
16969            _prev_end_offset = cur_offset + envelope_size;
16970
16971            Ok(())
16972        }
16973    }
16974
16975    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
16976        for AllocatorAllocateNonSharedCollectionRequest
16977    {
16978        #[inline(always)]
16979        fn new_empty() -> Self {
16980            Self::default()
16981        }
16982
16983        unsafe fn decode(
16984            &mut self,
16985            decoder: &mut fidl::encoding::Decoder<
16986                '_,
16987                fidl::encoding::DefaultFuchsiaResourceDialect,
16988            >,
16989            offset: usize,
16990            mut depth: fidl::encoding::Depth,
16991        ) -> fidl::Result<()> {
16992            decoder.debug_check_bounds::<Self>(offset);
16993            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
16994                None => return Err(fidl::Error::NotNullable),
16995                Some(len) => len,
16996            };
16997            // Calling decoder.out_of_line_offset(0) is not allowed.
16998            if len == 0 {
16999                return Ok(());
17000            };
17001            depth.increment()?;
17002            let envelope_size = 8;
17003            let bytes_len = len * envelope_size;
17004            let offset = decoder.out_of_line_offset(bytes_len)?;
17005            // Decode the envelope for each type.
17006            let mut _next_ordinal_to_read = 0;
17007            let mut next_offset = offset;
17008            let end_offset = offset + bytes_len;
17009            _next_ordinal_to_read += 1;
17010            if next_offset >= end_offset {
17011                return Ok(());
17012            }
17013
17014            // Decode unknown envelopes for gaps in ordinals.
17015            while _next_ordinal_to_read < 1 {
17016                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17017                _next_ordinal_to_read += 1;
17018                next_offset += envelope_size;
17019            }
17020
17021            let next_out_of_line = decoder.next_out_of_line();
17022            let handles_before = decoder.remaining_handles();
17023            if let Some((inlined, num_bytes, num_handles)) =
17024                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17025            {
17026                let member_inline_size = <fidl::encoding::Endpoint<
17027                    fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17028                > as fidl::encoding::TypeMarker>::inline_size(
17029                    decoder.context
17030                );
17031                if inlined != (member_inline_size <= 4) {
17032                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17033                }
17034                let inner_offset;
17035                let mut inner_depth = depth.clone();
17036                if inlined {
17037                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17038                    inner_offset = next_offset;
17039                } else {
17040                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17041                    inner_depth.increment()?;
17042                }
17043                let val_ref = self.collection_request.get_or_insert_with(|| {
17044                    fidl::new_empty!(
17045                        fidl::encoding::Endpoint<
17046                            fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17047                        >,
17048                        fidl::encoding::DefaultFuchsiaResourceDialect
17049                    )
17050                });
17051                fidl::decode!(
17052                    fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17053                    fidl::encoding::DefaultFuchsiaResourceDialect,
17054                    val_ref,
17055                    decoder,
17056                    inner_offset,
17057                    inner_depth
17058                )?;
17059                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17060                {
17061                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17062                }
17063                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17064                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17065                }
17066            }
17067
17068            next_offset += envelope_size;
17069
17070            // Decode the remaining unknown envelopes.
17071            while next_offset < end_offset {
17072                _next_ordinal_to_read += 1;
17073                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17074                next_offset += envelope_size;
17075            }
17076
17077            Ok(())
17078        }
17079    }
17080
17081    impl AllocatorAllocateSharedCollectionRequest {
17082        #[inline(always)]
17083        fn max_ordinal_present(&self) -> u64 {
17084            if let Some(_) = self.token_request {
17085                return 1;
17086            }
17087            0
17088        }
17089    }
17090
17091    impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateSharedCollectionRequest {
17092        type Borrowed<'a> = &'a mut Self;
17093        fn take_or_borrow<'a>(
17094            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17095        ) -> Self::Borrowed<'a> {
17096            value
17097        }
17098    }
17099
17100    unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateSharedCollectionRequest {
17101        type Owned = Self;
17102
17103        #[inline(always)]
17104        fn inline_align(_context: fidl::encoding::Context) -> usize {
17105            8
17106        }
17107
17108        #[inline(always)]
17109        fn inline_size(_context: fidl::encoding::Context) -> usize {
17110            16
17111        }
17112    }
17113
17114    unsafe impl
17115        fidl::encoding::Encode<
17116            AllocatorAllocateSharedCollectionRequest,
17117            fidl::encoding::DefaultFuchsiaResourceDialect,
17118        > for &mut AllocatorAllocateSharedCollectionRequest
17119    {
17120        unsafe fn encode(
17121            self,
17122            encoder: &mut fidl::encoding::Encoder<
17123                '_,
17124                fidl::encoding::DefaultFuchsiaResourceDialect,
17125            >,
17126            offset: usize,
17127            mut depth: fidl::encoding::Depth,
17128        ) -> fidl::Result<()> {
17129            encoder.debug_check_bounds::<AllocatorAllocateSharedCollectionRequest>(offset);
17130            // Vector header
17131            let max_ordinal: u64 = self.max_ordinal_present();
17132            encoder.write_num(max_ordinal, offset);
17133            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17134            // Calling encoder.out_of_line_offset(0) is not allowed.
17135            if max_ordinal == 0 {
17136                return Ok(());
17137            }
17138            depth.increment()?;
17139            let envelope_size = 8;
17140            let bytes_len = max_ordinal as usize * envelope_size;
17141            #[allow(unused_variables)]
17142            let offset = encoder.out_of_line_offset(bytes_len);
17143            let mut _prev_end_offset: usize = 0;
17144            if 1 > max_ordinal {
17145                return Ok(());
17146            }
17147
17148            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17149            // are envelope_size bytes.
17150            let cur_offset: usize = (1 - 1) * envelope_size;
17151
17152            // Zero reserved fields.
17153            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17154
17155            // Safety:
17156            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17157            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17158            //   envelope_size bytes, there is always sufficient room.
17159            fidl::encoding::encode_in_envelope_optional::<
17160                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
17161                fidl::encoding::DefaultFuchsiaResourceDialect,
17162            >(
17163                self.token_request.as_mut().map(
17164                    <fidl::encoding::Endpoint<
17165                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17166                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17167                ),
17168                encoder,
17169                offset + cur_offset,
17170                depth,
17171            )?;
17172
17173            _prev_end_offset = cur_offset + envelope_size;
17174
17175            Ok(())
17176        }
17177    }
17178
17179    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17180        for AllocatorAllocateSharedCollectionRequest
17181    {
17182        #[inline(always)]
17183        fn new_empty() -> Self {
17184            Self::default()
17185        }
17186
17187        unsafe fn decode(
17188            &mut self,
17189            decoder: &mut fidl::encoding::Decoder<
17190                '_,
17191                fidl::encoding::DefaultFuchsiaResourceDialect,
17192            >,
17193            offset: usize,
17194            mut depth: fidl::encoding::Depth,
17195        ) -> fidl::Result<()> {
17196            decoder.debug_check_bounds::<Self>(offset);
17197            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17198                None => return Err(fidl::Error::NotNullable),
17199                Some(len) => len,
17200            };
17201            // Calling decoder.out_of_line_offset(0) is not allowed.
17202            if len == 0 {
17203                return Ok(());
17204            };
17205            depth.increment()?;
17206            let envelope_size = 8;
17207            let bytes_len = len * envelope_size;
17208            let offset = decoder.out_of_line_offset(bytes_len)?;
17209            // Decode the envelope for each type.
17210            let mut _next_ordinal_to_read = 0;
17211            let mut next_offset = offset;
17212            let end_offset = offset + bytes_len;
17213            _next_ordinal_to_read += 1;
17214            if next_offset >= end_offset {
17215                return Ok(());
17216            }
17217
17218            // Decode unknown envelopes for gaps in ordinals.
17219            while _next_ordinal_to_read < 1 {
17220                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17221                _next_ordinal_to_read += 1;
17222                next_offset += envelope_size;
17223            }
17224
17225            let next_out_of_line = decoder.next_out_of_line();
17226            let handles_before = decoder.remaining_handles();
17227            if let Some((inlined, num_bytes, num_handles)) =
17228                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17229            {
17230                let member_inline_size = <fidl::encoding::Endpoint<
17231                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17232                > as fidl::encoding::TypeMarker>::inline_size(
17233                    decoder.context
17234                );
17235                if inlined != (member_inline_size <= 4) {
17236                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17237                }
17238                let inner_offset;
17239                let mut inner_depth = depth.clone();
17240                if inlined {
17241                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17242                    inner_offset = next_offset;
17243                } else {
17244                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17245                    inner_depth.increment()?;
17246                }
17247                let val_ref = self.token_request.get_or_insert_with(|| {
17248                    fidl::new_empty!(
17249                        fidl::encoding::Endpoint<
17250                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17251                        >,
17252                        fidl::encoding::DefaultFuchsiaResourceDialect
17253                    )
17254                });
17255                fidl::decode!(
17256                    fidl::encoding::Endpoint<
17257                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17258                    >,
17259                    fidl::encoding::DefaultFuchsiaResourceDialect,
17260                    val_ref,
17261                    decoder,
17262                    inner_offset,
17263                    inner_depth
17264                )?;
17265                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17266                {
17267                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17268                }
17269                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17270                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17271                }
17272            }
17273
17274            next_offset += envelope_size;
17275
17276            // Decode the remaining unknown envelopes.
17277            while next_offset < end_offset {
17278                _next_ordinal_to_read += 1;
17279                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17280                next_offset += envelope_size;
17281            }
17282
17283            Ok(())
17284        }
17285    }
17286
17287    impl AllocatorBindSharedCollectionRequest {
17288        #[inline(always)]
17289        fn max_ordinal_present(&self) -> u64 {
17290            if let Some(_) = self.buffer_collection_request {
17291                return 2;
17292            }
17293            if let Some(_) = self.token {
17294                return 1;
17295            }
17296            0
17297        }
17298    }
17299
17300    impl fidl::encoding::ResourceTypeMarker for AllocatorBindSharedCollectionRequest {
17301        type Borrowed<'a> = &'a mut Self;
17302        fn take_or_borrow<'a>(
17303            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17304        ) -> Self::Borrowed<'a> {
17305            value
17306        }
17307    }
17308
17309    unsafe impl fidl::encoding::TypeMarker for AllocatorBindSharedCollectionRequest {
17310        type Owned = Self;
17311
17312        #[inline(always)]
17313        fn inline_align(_context: fidl::encoding::Context) -> usize {
17314            8
17315        }
17316
17317        #[inline(always)]
17318        fn inline_size(_context: fidl::encoding::Context) -> usize {
17319            16
17320        }
17321    }
17322
17323    unsafe impl
17324        fidl::encoding::Encode<
17325            AllocatorBindSharedCollectionRequest,
17326            fidl::encoding::DefaultFuchsiaResourceDialect,
17327        > for &mut AllocatorBindSharedCollectionRequest
17328    {
17329        unsafe fn encode(
17330            self,
17331            encoder: &mut fidl::encoding::Encoder<
17332                '_,
17333                fidl::encoding::DefaultFuchsiaResourceDialect,
17334            >,
17335            offset: usize,
17336            mut depth: fidl::encoding::Depth,
17337        ) -> fidl::Result<()> {
17338            encoder.debug_check_bounds::<AllocatorBindSharedCollectionRequest>(offset);
17339            // Vector header
17340            let max_ordinal: u64 = self.max_ordinal_present();
17341            encoder.write_num(max_ordinal, offset);
17342            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17343            // Calling encoder.out_of_line_offset(0) is not allowed.
17344            if max_ordinal == 0 {
17345                return Ok(());
17346            }
17347            depth.increment()?;
17348            let envelope_size = 8;
17349            let bytes_len = max_ordinal as usize * envelope_size;
17350            #[allow(unused_variables)]
17351            let offset = encoder.out_of_line_offset(bytes_len);
17352            let mut _prev_end_offset: usize = 0;
17353            if 1 > max_ordinal {
17354                return Ok(());
17355            }
17356
17357            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17358            // are envelope_size bytes.
17359            let cur_offset: usize = (1 - 1) * envelope_size;
17360
17361            // Zero reserved fields.
17362            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17363
17364            // Safety:
17365            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17366            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17367            //   envelope_size bytes, there is always sufficient room.
17368            fidl::encoding::encode_in_envelope_optional::<
17369                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
17370                fidl::encoding::DefaultFuchsiaResourceDialect,
17371            >(
17372                self.token.as_mut().map(
17373                    <fidl::encoding::Endpoint<
17374                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17375                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17376                ),
17377                encoder,
17378                offset + cur_offset,
17379                depth,
17380            )?;
17381
17382            _prev_end_offset = cur_offset + envelope_size;
17383            if 2 > max_ordinal {
17384                return Ok(());
17385            }
17386
17387            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17388            // are envelope_size bytes.
17389            let cur_offset: usize = (2 - 1) * envelope_size;
17390
17391            // Zero reserved fields.
17392            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17393
17394            // Safety:
17395            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17396            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17397            //   envelope_size bytes, there is always sufficient room.
17398            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
17399            self.buffer_collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
17400            encoder, offset + cur_offset, depth
17401        )?;
17402
17403            _prev_end_offset = cur_offset + envelope_size;
17404
17405            Ok(())
17406        }
17407    }
17408
17409    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17410        for AllocatorBindSharedCollectionRequest
17411    {
17412        #[inline(always)]
17413        fn new_empty() -> Self {
17414            Self::default()
17415        }
17416
17417        unsafe fn decode(
17418            &mut self,
17419            decoder: &mut fidl::encoding::Decoder<
17420                '_,
17421                fidl::encoding::DefaultFuchsiaResourceDialect,
17422            >,
17423            offset: usize,
17424            mut depth: fidl::encoding::Depth,
17425        ) -> fidl::Result<()> {
17426            decoder.debug_check_bounds::<Self>(offset);
17427            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17428                None => return Err(fidl::Error::NotNullable),
17429                Some(len) => len,
17430            };
17431            // Calling decoder.out_of_line_offset(0) is not allowed.
17432            if len == 0 {
17433                return Ok(());
17434            };
17435            depth.increment()?;
17436            let envelope_size = 8;
17437            let bytes_len = len * envelope_size;
17438            let offset = decoder.out_of_line_offset(bytes_len)?;
17439            // Decode the envelope for each type.
17440            let mut _next_ordinal_to_read = 0;
17441            let mut next_offset = offset;
17442            let end_offset = offset + bytes_len;
17443            _next_ordinal_to_read += 1;
17444            if next_offset >= end_offset {
17445                return Ok(());
17446            }
17447
17448            // Decode unknown envelopes for gaps in ordinals.
17449            while _next_ordinal_to_read < 1 {
17450                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17451                _next_ordinal_to_read += 1;
17452                next_offset += envelope_size;
17453            }
17454
17455            let next_out_of_line = decoder.next_out_of_line();
17456            let handles_before = decoder.remaining_handles();
17457            if let Some((inlined, num_bytes, num_handles)) =
17458                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17459            {
17460                let member_inline_size = <fidl::encoding::Endpoint<
17461                    fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17462                > as fidl::encoding::TypeMarker>::inline_size(
17463                    decoder.context
17464                );
17465                if inlined != (member_inline_size <= 4) {
17466                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17467                }
17468                let inner_offset;
17469                let mut inner_depth = depth.clone();
17470                if inlined {
17471                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17472                    inner_offset = next_offset;
17473                } else {
17474                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17475                    inner_depth.increment()?;
17476                }
17477                let val_ref = self.token.get_or_insert_with(|| {
17478                    fidl::new_empty!(
17479                        fidl::encoding::Endpoint<
17480                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17481                        >,
17482                        fidl::encoding::DefaultFuchsiaResourceDialect
17483                    )
17484                });
17485                fidl::decode!(
17486                    fidl::encoding::Endpoint<
17487                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17488                    >,
17489                    fidl::encoding::DefaultFuchsiaResourceDialect,
17490                    val_ref,
17491                    decoder,
17492                    inner_offset,
17493                    inner_depth
17494                )?;
17495                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17496                {
17497                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17498                }
17499                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17500                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17501                }
17502            }
17503
17504            next_offset += envelope_size;
17505            _next_ordinal_to_read += 1;
17506            if next_offset >= end_offset {
17507                return Ok(());
17508            }
17509
17510            // Decode unknown envelopes for gaps in ordinals.
17511            while _next_ordinal_to_read < 2 {
17512                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17513                _next_ordinal_to_read += 1;
17514                next_offset += envelope_size;
17515            }
17516
17517            let next_out_of_line = decoder.next_out_of_line();
17518            let handles_before = decoder.remaining_handles();
17519            if let Some((inlined, num_bytes, num_handles)) =
17520                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17521            {
17522                let member_inline_size = <fidl::encoding::Endpoint<
17523                    fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17524                > as fidl::encoding::TypeMarker>::inline_size(
17525                    decoder.context
17526                );
17527                if inlined != (member_inline_size <= 4) {
17528                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17529                }
17530                let inner_offset;
17531                let mut inner_depth = depth.clone();
17532                if inlined {
17533                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17534                    inner_offset = next_offset;
17535                } else {
17536                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17537                    inner_depth.increment()?;
17538                }
17539                let val_ref = self.buffer_collection_request.get_or_insert_with(|| {
17540                    fidl::new_empty!(
17541                        fidl::encoding::Endpoint<
17542                            fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17543                        >,
17544                        fidl::encoding::DefaultFuchsiaResourceDialect
17545                    )
17546                });
17547                fidl::decode!(
17548                    fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17549                    fidl::encoding::DefaultFuchsiaResourceDialect,
17550                    val_ref,
17551                    decoder,
17552                    inner_offset,
17553                    inner_depth
17554                )?;
17555                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17556                {
17557                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17558                }
17559                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17560                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17561                }
17562            }
17563
17564            next_offset += envelope_size;
17565
17566            // Decode the remaining unknown envelopes.
17567            while next_offset < end_offset {
17568                _next_ordinal_to_read += 1;
17569                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17570                next_offset += envelope_size;
17571            }
17572
17573            Ok(())
17574        }
17575    }
17576
17577    impl AllocatorGetVmoInfoRequest {
17578        #[inline(always)]
17579        fn max_ordinal_present(&self) -> u64 {
17580            if let Some(_) = self.vmo {
17581                return 1;
17582            }
17583            0
17584        }
17585    }
17586
17587    impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoRequest {
17588        type Borrowed<'a> = &'a mut Self;
17589        fn take_or_borrow<'a>(
17590            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17591        ) -> Self::Borrowed<'a> {
17592            value
17593        }
17594    }
17595
17596    unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoRequest {
17597        type Owned = Self;
17598
17599        #[inline(always)]
17600        fn inline_align(_context: fidl::encoding::Context) -> usize {
17601            8
17602        }
17603
17604        #[inline(always)]
17605        fn inline_size(_context: fidl::encoding::Context) -> usize {
17606            16
17607        }
17608    }
17609
17610    unsafe impl
17611        fidl::encoding::Encode<
17612            AllocatorGetVmoInfoRequest,
17613            fidl::encoding::DefaultFuchsiaResourceDialect,
17614        > for &mut AllocatorGetVmoInfoRequest
17615    {
17616        unsafe fn encode(
17617            self,
17618            encoder: &mut fidl::encoding::Encoder<
17619                '_,
17620                fidl::encoding::DefaultFuchsiaResourceDialect,
17621            >,
17622            offset: usize,
17623            mut depth: fidl::encoding::Depth,
17624        ) -> fidl::Result<()> {
17625            encoder.debug_check_bounds::<AllocatorGetVmoInfoRequest>(offset);
17626            // Vector header
17627            let max_ordinal: u64 = self.max_ordinal_present();
17628            encoder.write_num(max_ordinal, offset);
17629            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17630            // Calling encoder.out_of_line_offset(0) is not allowed.
17631            if max_ordinal == 0 {
17632                return Ok(());
17633            }
17634            depth.increment()?;
17635            let envelope_size = 8;
17636            let bytes_len = max_ordinal as usize * envelope_size;
17637            #[allow(unused_variables)]
17638            let offset = encoder.out_of_line_offset(bytes_len);
17639            let mut _prev_end_offset: usize = 0;
17640            if 1 > max_ordinal {
17641                return Ok(());
17642            }
17643
17644            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17645            // are envelope_size bytes.
17646            let cur_offset: usize = (1 - 1) * envelope_size;
17647
17648            // Zero reserved fields.
17649            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17650
17651            // Safety:
17652            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17653            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17654            //   envelope_size bytes, there is always sufficient room.
17655            fidl::encoding::encode_in_envelope_optional::<
17656                fidl::encoding::HandleType<
17657                    fidl::Vmo,
17658                    { fidl::ObjectType::VMO.into_raw() },
17659                    2147483648,
17660                >,
17661                fidl::encoding::DefaultFuchsiaResourceDialect,
17662            >(
17663                self.vmo.as_mut().map(
17664                    <fidl::encoding::HandleType<
17665                        fidl::Vmo,
17666                        { fidl::ObjectType::VMO.into_raw() },
17667                        2147483648,
17668                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17669                ),
17670                encoder,
17671                offset + cur_offset,
17672                depth,
17673            )?;
17674
17675            _prev_end_offset = cur_offset + envelope_size;
17676
17677            Ok(())
17678        }
17679    }
17680
17681    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17682        for AllocatorGetVmoInfoRequest
17683    {
17684        #[inline(always)]
17685        fn new_empty() -> Self {
17686            Self::default()
17687        }
17688
17689        unsafe fn decode(
17690            &mut self,
17691            decoder: &mut fidl::encoding::Decoder<
17692                '_,
17693                fidl::encoding::DefaultFuchsiaResourceDialect,
17694            >,
17695            offset: usize,
17696            mut depth: fidl::encoding::Depth,
17697        ) -> fidl::Result<()> {
17698            decoder.debug_check_bounds::<Self>(offset);
17699            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17700                None => return Err(fidl::Error::NotNullable),
17701                Some(len) => len,
17702            };
17703            // Calling decoder.out_of_line_offset(0) is not allowed.
17704            if len == 0 {
17705                return Ok(());
17706            };
17707            depth.increment()?;
17708            let envelope_size = 8;
17709            let bytes_len = len * envelope_size;
17710            let offset = decoder.out_of_line_offset(bytes_len)?;
17711            // Decode the envelope for each type.
17712            let mut _next_ordinal_to_read = 0;
17713            let mut next_offset = offset;
17714            let end_offset = offset + bytes_len;
17715            _next_ordinal_to_read += 1;
17716            if next_offset >= end_offset {
17717                return Ok(());
17718            }
17719
17720            // Decode unknown envelopes for gaps in ordinals.
17721            while _next_ordinal_to_read < 1 {
17722                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17723                _next_ordinal_to_read += 1;
17724                next_offset += envelope_size;
17725            }
17726
17727            let next_out_of_line = decoder.next_out_of_line();
17728            let handles_before = decoder.remaining_handles();
17729            if let Some((inlined, num_bytes, num_handles)) =
17730                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17731            {
17732                let member_inline_size = <fidl::encoding::HandleType<
17733                    fidl::Vmo,
17734                    { fidl::ObjectType::VMO.into_raw() },
17735                    2147483648,
17736                > as fidl::encoding::TypeMarker>::inline_size(
17737                    decoder.context
17738                );
17739                if inlined != (member_inline_size <= 4) {
17740                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17741                }
17742                let inner_offset;
17743                let mut inner_depth = depth.clone();
17744                if inlined {
17745                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17746                    inner_offset = next_offset;
17747                } else {
17748                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17749                    inner_depth.increment()?;
17750                }
17751                let val_ref =
17752                self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
17753                fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
17754                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17755                {
17756                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17757                }
17758                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17759                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17760                }
17761            }
17762
17763            next_offset += envelope_size;
17764
17765            // Decode the remaining unknown envelopes.
17766            while next_offset < end_offset {
17767                _next_ordinal_to_read += 1;
17768                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17769                next_offset += envelope_size;
17770            }
17771
17772            Ok(())
17773        }
17774    }
17775
17776    impl AllocatorGetVmoInfoResponse {
17777        #[inline(always)]
17778        fn max_ordinal_present(&self) -> u64 {
17779            if let Some(_) = self.close_weak_asap {
17780                return 3;
17781            }
17782            if let Some(_) = self.buffer_index {
17783                return 2;
17784            }
17785            if let Some(_) = self.buffer_collection_id {
17786                return 1;
17787            }
17788            0
17789        }
17790    }
17791
17792    impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoResponse {
17793        type Borrowed<'a> = &'a mut Self;
17794        fn take_or_borrow<'a>(
17795            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17796        ) -> Self::Borrowed<'a> {
17797            value
17798        }
17799    }
17800
17801    unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoResponse {
17802        type Owned = Self;
17803
17804        #[inline(always)]
17805        fn inline_align(_context: fidl::encoding::Context) -> usize {
17806            8
17807        }
17808
17809        #[inline(always)]
17810        fn inline_size(_context: fidl::encoding::Context) -> usize {
17811            16
17812        }
17813    }
17814
17815    unsafe impl
17816        fidl::encoding::Encode<
17817            AllocatorGetVmoInfoResponse,
17818            fidl::encoding::DefaultFuchsiaResourceDialect,
17819        > for &mut AllocatorGetVmoInfoResponse
17820    {
17821        unsafe fn encode(
17822            self,
17823            encoder: &mut fidl::encoding::Encoder<
17824                '_,
17825                fidl::encoding::DefaultFuchsiaResourceDialect,
17826            >,
17827            offset: usize,
17828            mut depth: fidl::encoding::Depth,
17829        ) -> fidl::Result<()> {
17830            encoder.debug_check_bounds::<AllocatorGetVmoInfoResponse>(offset);
17831            // Vector header
17832            let max_ordinal: u64 = self.max_ordinal_present();
17833            encoder.write_num(max_ordinal, offset);
17834            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17835            // Calling encoder.out_of_line_offset(0) is not allowed.
17836            if max_ordinal == 0 {
17837                return Ok(());
17838            }
17839            depth.increment()?;
17840            let envelope_size = 8;
17841            let bytes_len = max_ordinal as usize * envelope_size;
17842            #[allow(unused_variables)]
17843            let offset = encoder.out_of_line_offset(bytes_len);
17844            let mut _prev_end_offset: usize = 0;
17845            if 1 > max_ordinal {
17846                return Ok(());
17847            }
17848
17849            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17850            // are envelope_size bytes.
17851            let cur_offset: usize = (1 - 1) * envelope_size;
17852
17853            // Zero reserved fields.
17854            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17855
17856            // Safety:
17857            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17858            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17859            //   envelope_size bytes, there is always sufficient room.
17860            fidl::encoding::encode_in_envelope_optional::<
17861                u64,
17862                fidl::encoding::DefaultFuchsiaResourceDialect,
17863            >(
17864                self.buffer_collection_id
17865                    .as_ref()
17866                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17867                encoder,
17868                offset + cur_offset,
17869                depth,
17870            )?;
17871
17872            _prev_end_offset = cur_offset + envelope_size;
17873            if 2 > max_ordinal {
17874                return Ok(());
17875            }
17876
17877            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17878            // are envelope_size bytes.
17879            let cur_offset: usize = (2 - 1) * envelope_size;
17880
17881            // Zero reserved fields.
17882            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17883
17884            // Safety:
17885            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17886            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17887            //   envelope_size bytes, there is always sufficient room.
17888            fidl::encoding::encode_in_envelope_optional::<
17889                u64,
17890                fidl::encoding::DefaultFuchsiaResourceDialect,
17891            >(
17892                self.buffer_index.as_ref().map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17893                encoder,
17894                offset + cur_offset,
17895                depth,
17896            )?;
17897
17898            _prev_end_offset = cur_offset + envelope_size;
17899            if 3 > max_ordinal {
17900                return Ok(());
17901            }
17902
17903            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17904            // are envelope_size bytes.
17905            let cur_offset: usize = (3 - 1) * envelope_size;
17906
17907            // Zero reserved fields.
17908            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17909
17910            // Safety:
17911            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17912            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17913            //   envelope_size bytes, there is always sufficient room.
17914            fidl::encoding::encode_in_envelope_optional::<
17915                fidl::encoding::HandleType<
17916                    fidl::EventPair,
17917                    { fidl::ObjectType::EVENTPAIR.into_raw() },
17918                    2147483648,
17919                >,
17920                fidl::encoding::DefaultFuchsiaResourceDialect,
17921            >(
17922                self.close_weak_asap.as_mut().map(
17923                    <fidl::encoding::HandleType<
17924                        fidl::EventPair,
17925                        { fidl::ObjectType::EVENTPAIR.into_raw() },
17926                        2147483648,
17927                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17928                ),
17929                encoder,
17930                offset + cur_offset,
17931                depth,
17932            )?;
17933
17934            _prev_end_offset = cur_offset + envelope_size;
17935
17936            Ok(())
17937        }
17938    }
17939
17940    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17941        for AllocatorGetVmoInfoResponse
17942    {
17943        #[inline(always)]
17944        fn new_empty() -> Self {
17945            Self::default()
17946        }
17947
17948        unsafe fn decode(
17949            &mut self,
17950            decoder: &mut fidl::encoding::Decoder<
17951                '_,
17952                fidl::encoding::DefaultFuchsiaResourceDialect,
17953            >,
17954            offset: usize,
17955            mut depth: fidl::encoding::Depth,
17956        ) -> fidl::Result<()> {
17957            decoder.debug_check_bounds::<Self>(offset);
17958            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17959                None => return Err(fidl::Error::NotNullable),
17960                Some(len) => len,
17961            };
17962            // Calling decoder.out_of_line_offset(0) is not allowed.
17963            if len == 0 {
17964                return Ok(());
17965            };
17966            depth.increment()?;
17967            let envelope_size = 8;
17968            let bytes_len = len * envelope_size;
17969            let offset = decoder.out_of_line_offset(bytes_len)?;
17970            // Decode the envelope for each type.
17971            let mut _next_ordinal_to_read = 0;
17972            let mut next_offset = offset;
17973            let end_offset = offset + bytes_len;
17974            _next_ordinal_to_read += 1;
17975            if next_offset >= end_offset {
17976                return Ok(());
17977            }
17978
17979            // Decode unknown envelopes for gaps in ordinals.
17980            while _next_ordinal_to_read < 1 {
17981                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17982                _next_ordinal_to_read += 1;
17983                next_offset += envelope_size;
17984            }
17985
17986            let next_out_of_line = decoder.next_out_of_line();
17987            let handles_before = decoder.remaining_handles();
17988            if let Some((inlined, num_bytes, num_handles)) =
17989                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17990            {
17991                let member_inline_size =
17992                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
17993                if inlined != (member_inline_size <= 4) {
17994                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17995                }
17996                let inner_offset;
17997                let mut inner_depth = depth.clone();
17998                if inlined {
17999                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18000                    inner_offset = next_offset;
18001                } else {
18002                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18003                    inner_depth.increment()?;
18004                }
18005                let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
18006                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
18007                });
18008                fidl::decode!(
18009                    u64,
18010                    fidl::encoding::DefaultFuchsiaResourceDialect,
18011                    val_ref,
18012                    decoder,
18013                    inner_offset,
18014                    inner_depth
18015                )?;
18016                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18017                {
18018                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18019                }
18020                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18021                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18022                }
18023            }
18024
18025            next_offset += envelope_size;
18026            _next_ordinal_to_read += 1;
18027            if next_offset >= end_offset {
18028                return Ok(());
18029            }
18030
18031            // Decode unknown envelopes for gaps in ordinals.
18032            while _next_ordinal_to_read < 2 {
18033                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18034                _next_ordinal_to_read += 1;
18035                next_offset += envelope_size;
18036            }
18037
18038            let next_out_of_line = decoder.next_out_of_line();
18039            let handles_before = decoder.remaining_handles();
18040            if let Some((inlined, num_bytes, num_handles)) =
18041                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18042            {
18043                let member_inline_size =
18044                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18045                if inlined != (member_inline_size <= 4) {
18046                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18047                }
18048                let inner_offset;
18049                let mut inner_depth = depth.clone();
18050                if inlined {
18051                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18052                    inner_offset = next_offset;
18053                } else {
18054                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18055                    inner_depth.increment()?;
18056                }
18057                let val_ref = self.buffer_index.get_or_insert_with(|| {
18058                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
18059                });
18060                fidl::decode!(
18061                    u64,
18062                    fidl::encoding::DefaultFuchsiaResourceDialect,
18063                    val_ref,
18064                    decoder,
18065                    inner_offset,
18066                    inner_depth
18067                )?;
18068                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18069                {
18070                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18071                }
18072                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18073                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18074                }
18075            }
18076
18077            next_offset += envelope_size;
18078            _next_ordinal_to_read += 1;
18079            if next_offset >= end_offset {
18080                return Ok(());
18081            }
18082
18083            // Decode unknown envelopes for gaps in ordinals.
18084            while _next_ordinal_to_read < 3 {
18085                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18086                _next_ordinal_to_read += 1;
18087                next_offset += envelope_size;
18088            }
18089
18090            let next_out_of_line = decoder.next_out_of_line();
18091            let handles_before = decoder.remaining_handles();
18092            if let Some((inlined, num_bytes, num_handles)) =
18093                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18094            {
18095                let member_inline_size = <fidl::encoding::HandleType<
18096                    fidl::EventPair,
18097                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18098                    2147483648,
18099                > as fidl::encoding::TypeMarker>::inline_size(
18100                    decoder.context
18101                );
18102                if inlined != (member_inline_size <= 4) {
18103                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18104                }
18105                let inner_offset;
18106                let mut inner_depth = depth.clone();
18107                if inlined {
18108                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18109                    inner_offset = next_offset;
18110                } else {
18111                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18112                    inner_depth.increment()?;
18113                }
18114                let val_ref =
18115                self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18116                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18117                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18118                {
18119                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18120                }
18121                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18122                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18123                }
18124            }
18125
18126            next_offset += envelope_size;
18127
18128            // Decode the remaining unknown envelopes.
18129            while next_offset < end_offset {
18130                _next_ordinal_to_read += 1;
18131                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18132                next_offset += envelope_size;
18133            }
18134
18135            Ok(())
18136        }
18137    }
18138
18139    impl BufferCollectionAttachLifetimeTrackingRequest {
18140        #[inline(always)]
18141        fn max_ordinal_present(&self) -> u64 {
18142            if let Some(_) = self.buffers_remaining {
18143                return 2;
18144            }
18145            if let Some(_) = self.server_end {
18146                return 1;
18147            }
18148            0
18149        }
18150    }
18151
18152    impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18153        type Borrowed<'a> = &'a mut Self;
18154        fn take_or_borrow<'a>(
18155            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18156        ) -> Self::Borrowed<'a> {
18157            value
18158        }
18159    }
18160
18161    unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18162        type Owned = Self;
18163
18164        #[inline(always)]
18165        fn inline_align(_context: fidl::encoding::Context) -> usize {
18166            8
18167        }
18168
18169        #[inline(always)]
18170        fn inline_size(_context: fidl::encoding::Context) -> usize {
18171            16
18172        }
18173    }
18174
18175    unsafe impl
18176        fidl::encoding::Encode<
18177            BufferCollectionAttachLifetimeTrackingRequest,
18178            fidl::encoding::DefaultFuchsiaResourceDialect,
18179        > for &mut BufferCollectionAttachLifetimeTrackingRequest
18180    {
18181        unsafe fn encode(
18182            self,
18183            encoder: &mut fidl::encoding::Encoder<
18184                '_,
18185                fidl::encoding::DefaultFuchsiaResourceDialect,
18186            >,
18187            offset: usize,
18188            mut depth: fidl::encoding::Depth,
18189        ) -> fidl::Result<()> {
18190            encoder.debug_check_bounds::<BufferCollectionAttachLifetimeTrackingRequest>(offset);
18191            // Vector header
18192            let max_ordinal: u64 = self.max_ordinal_present();
18193            encoder.write_num(max_ordinal, offset);
18194            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18195            // Calling encoder.out_of_line_offset(0) is not allowed.
18196            if max_ordinal == 0 {
18197                return Ok(());
18198            }
18199            depth.increment()?;
18200            let envelope_size = 8;
18201            let bytes_len = max_ordinal as usize * envelope_size;
18202            #[allow(unused_variables)]
18203            let offset = encoder.out_of_line_offset(bytes_len);
18204            let mut _prev_end_offset: usize = 0;
18205            if 1 > max_ordinal {
18206                return Ok(());
18207            }
18208
18209            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18210            // are envelope_size bytes.
18211            let cur_offset: usize = (1 - 1) * envelope_size;
18212
18213            // Zero reserved fields.
18214            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18215
18216            // Safety:
18217            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18218            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18219            //   envelope_size bytes, there is always sufficient room.
18220            fidl::encoding::encode_in_envelope_optional::<
18221                fidl::encoding::HandleType<
18222                    fidl::EventPair,
18223                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18224                    2147483648,
18225                >,
18226                fidl::encoding::DefaultFuchsiaResourceDialect,
18227            >(
18228                self.server_end.as_mut().map(
18229                    <fidl::encoding::HandleType<
18230                        fidl::EventPair,
18231                        { fidl::ObjectType::EVENTPAIR.into_raw() },
18232                        2147483648,
18233                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18234                ),
18235                encoder,
18236                offset + cur_offset,
18237                depth,
18238            )?;
18239
18240            _prev_end_offset = cur_offset + envelope_size;
18241            if 2 > max_ordinal {
18242                return Ok(());
18243            }
18244
18245            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18246            // are envelope_size bytes.
18247            let cur_offset: usize = (2 - 1) * envelope_size;
18248
18249            // Zero reserved fields.
18250            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18251
18252            // Safety:
18253            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18254            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18255            //   envelope_size bytes, there is always sufficient room.
18256            fidl::encoding::encode_in_envelope_optional::<
18257                u32,
18258                fidl::encoding::DefaultFuchsiaResourceDialect,
18259            >(
18260                self.buffers_remaining
18261                    .as_ref()
18262                    .map(<u32 as fidl::encoding::ValueTypeMarker>::borrow),
18263                encoder,
18264                offset + cur_offset,
18265                depth,
18266            )?;
18267
18268            _prev_end_offset = cur_offset + envelope_size;
18269
18270            Ok(())
18271        }
18272    }
18273
18274    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18275        for BufferCollectionAttachLifetimeTrackingRequest
18276    {
18277        #[inline(always)]
18278        fn new_empty() -> Self {
18279            Self::default()
18280        }
18281
18282        unsafe fn decode(
18283            &mut self,
18284            decoder: &mut fidl::encoding::Decoder<
18285                '_,
18286                fidl::encoding::DefaultFuchsiaResourceDialect,
18287            >,
18288            offset: usize,
18289            mut depth: fidl::encoding::Depth,
18290        ) -> fidl::Result<()> {
18291            decoder.debug_check_bounds::<Self>(offset);
18292            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18293                None => return Err(fidl::Error::NotNullable),
18294                Some(len) => len,
18295            };
18296            // Calling decoder.out_of_line_offset(0) is not allowed.
18297            if len == 0 {
18298                return Ok(());
18299            };
18300            depth.increment()?;
18301            let envelope_size = 8;
18302            let bytes_len = len * envelope_size;
18303            let offset = decoder.out_of_line_offset(bytes_len)?;
18304            // Decode the envelope for each type.
18305            let mut _next_ordinal_to_read = 0;
18306            let mut next_offset = offset;
18307            let end_offset = offset + bytes_len;
18308            _next_ordinal_to_read += 1;
18309            if next_offset >= end_offset {
18310                return Ok(());
18311            }
18312
18313            // Decode unknown envelopes for gaps in ordinals.
18314            while _next_ordinal_to_read < 1 {
18315                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18316                _next_ordinal_to_read += 1;
18317                next_offset += envelope_size;
18318            }
18319
18320            let next_out_of_line = decoder.next_out_of_line();
18321            let handles_before = decoder.remaining_handles();
18322            if let Some((inlined, num_bytes, num_handles)) =
18323                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18324            {
18325                let member_inline_size = <fidl::encoding::HandleType<
18326                    fidl::EventPair,
18327                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18328                    2147483648,
18329                > as fidl::encoding::TypeMarker>::inline_size(
18330                    decoder.context
18331                );
18332                if inlined != (member_inline_size <= 4) {
18333                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18334                }
18335                let inner_offset;
18336                let mut inner_depth = depth.clone();
18337                if inlined {
18338                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18339                    inner_offset = next_offset;
18340                } else {
18341                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18342                    inner_depth.increment()?;
18343                }
18344                let val_ref =
18345                self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18346                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18347                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18348                {
18349                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18350                }
18351                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18352                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18353                }
18354            }
18355
18356            next_offset += envelope_size;
18357            _next_ordinal_to_read += 1;
18358            if next_offset >= end_offset {
18359                return Ok(());
18360            }
18361
18362            // Decode unknown envelopes for gaps in ordinals.
18363            while _next_ordinal_to_read < 2 {
18364                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18365                _next_ordinal_to_read += 1;
18366                next_offset += envelope_size;
18367            }
18368
18369            let next_out_of_line = decoder.next_out_of_line();
18370            let handles_before = decoder.remaining_handles();
18371            if let Some((inlined, num_bytes, num_handles)) =
18372                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18373            {
18374                let member_inline_size =
18375                    <u32 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18376                if inlined != (member_inline_size <= 4) {
18377                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18378                }
18379                let inner_offset;
18380                let mut inner_depth = depth.clone();
18381                if inlined {
18382                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18383                    inner_offset = next_offset;
18384                } else {
18385                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18386                    inner_depth.increment()?;
18387                }
18388                let val_ref = self.buffers_remaining.get_or_insert_with(|| {
18389                    fidl::new_empty!(u32, fidl::encoding::DefaultFuchsiaResourceDialect)
18390                });
18391                fidl::decode!(
18392                    u32,
18393                    fidl::encoding::DefaultFuchsiaResourceDialect,
18394                    val_ref,
18395                    decoder,
18396                    inner_offset,
18397                    inner_depth
18398                )?;
18399                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18400                {
18401                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18402                }
18403                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18404                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18405                }
18406            }
18407
18408            next_offset += envelope_size;
18409
18410            // Decode the remaining unknown envelopes.
18411            while next_offset < end_offset {
18412                _next_ordinal_to_read += 1;
18413                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18414                next_offset += envelope_size;
18415            }
18416
18417            Ok(())
18418        }
18419    }
18420
18421    impl BufferCollectionAttachTokenRequest {
18422        #[inline(always)]
18423        fn max_ordinal_present(&self) -> u64 {
18424            if let Some(_) = self.token_request {
18425                return 2;
18426            }
18427            if let Some(_) = self.rights_attenuation_mask {
18428                return 1;
18429            }
18430            0
18431        }
18432    }
18433
18434    impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachTokenRequest {
18435        type Borrowed<'a> = &'a mut Self;
18436        fn take_or_borrow<'a>(
18437            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18438        ) -> Self::Borrowed<'a> {
18439            value
18440        }
18441    }
18442
18443    unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachTokenRequest {
18444        type Owned = Self;
18445
18446        #[inline(always)]
18447        fn inline_align(_context: fidl::encoding::Context) -> usize {
18448            8
18449        }
18450
18451        #[inline(always)]
18452        fn inline_size(_context: fidl::encoding::Context) -> usize {
18453            16
18454        }
18455    }
18456
18457    unsafe impl
18458        fidl::encoding::Encode<
18459            BufferCollectionAttachTokenRequest,
18460            fidl::encoding::DefaultFuchsiaResourceDialect,
18461        > for &mut BufferCollectionAttachTokenRequest
18462    {
18463        unsafe fn encode(
18464            self,
18465            encoder: &mut fidl::encoding::Encoder<
18466                '_,
18467                fidl::encoding::DefaultFuchsiaResourceDialect,
18468            >,
18469            offset: usize,
18470            mut depth: fidl::encoding::Depth,
18471        ) -> fidl::Result<()> {
18472            encoder.debug_check_bounds::<BufferCollectionAttachTokenRequest>(offset);
18473            // Vector header
18474            let max_ordinal: u64 = self.max_ordinal_present();
18475            encoder.write_num(max_ordinal, offset);
18476            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18477            // Calling encoder.out_of_line_offset(0) is not allowed.
18478            if max_ordinal == 0 {
18479                return Ok(());
18480            }
18481            depth.increment()?;
18482            let envelope_size = 8;
18483            let bytes_len = max_ordinal as usize * envelope_size;
18484            #[allow(unused_variables)]
18485            let offset = encoder.out_of_line_offset(bytes_len);
18486            let mut _prev_end_offset: usize = 0;
18487            if 1 > max_ordinal {
18488                return Ok(());
18489            }
18490
18491            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18492            // are envelope_size bytes.
18493            let cur_offset: usize = (1 - 1) * envelope_size;
18494
18495            // Zero reserved fields.
18496            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18497
18498            // Safety:
18499            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18500            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18501            //   envelope_size bytes, there is always sufficient room.
18502            fidl::encoding::encode_in_envelope_optional::<
18503                fidl::Rights,
18504                fidl::encoding::DefaultFuchsiaResourceDialect,
18505            >(
18506                self.rights_attenuation_mask
18507                    .as_ref()
18508                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
18509                encoder,
18510                offset + cur_offset,
18511                depth,
18512            )?;
18513
18514            _prev_end_offset = cur_offset + envelope_size;
18515            if 2 > max_ordinal {
18516                return Ok(());
18517            }
18518
18519            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18520            // are envelope_size bytes.
18521            let cur_offset: usize = (2 - 1) * envelope_size;
18522
18523            // Zero reserved fields.
18524            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18525
18526            // Safety:
18527            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18528            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18529            //   envelope_size bytes, there is always sufficient room.
18530            fidl::encoding::encode_in_envelope_optional::<
18531                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
18532                fidl::encoding::DefaultFuchsiaResourceDialect,
18533            >(
18534                self.token_request.as_mut().map(
18535                    <fidl::encoding::Endpoint<
18536                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18537                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18538                ),
18539                encoder,
18540                offset + cur_offset,
18541                depth,
18542            )?;
18543
18544            _prev_end_offset = cur_offset + envelope_size;
18545
18546            Ok(())
18547        }
18548    }
18549
18550    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18551        for BufferCollectionAttachTokenRequest
18552    {
18553        #[inline(always)]
18554        fn new_empty() -> Self {
18555            Self::default()
18556        }
18557
18558        unsafe fn decode(
18559            &mut self,
18560            decoder: &mut fidl::encoding::Decoder<
18561                '_,
18562                fidl::encoding::DefaultFuchsiaResourceDialect,
18563            >,
18564            offset: usize,
18565            mut depth: fidl::encoding::Depth,
18566        ) -> fidl::Result<()> {
18567            decoder.debug_check_bounds::<Self>(offset);
18568            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18569                None => return Err(fidl::Error::NotNullable),
18570                Some(len) => len,
18571            };
18572            // Calling decoder.out_of_line_offset(0) is not allowed.
18573            if len == 0 {
18574                return Ok(());
18575            };
18576            depth.increment()?;
18577            let envelope_size = 8;
18578            let bytes_len = len * envelope_size;
18579            let offset = decoder.out_of_line_offset(bytes_len)?;
18580            // Decode the envelope for each type.
18581            let mut _next_ordinal_to_read = 0;
18582            let mut next_offset = offset;
18583            let end_offset = offset + bytes_len;
18584            _next_ordinal_to_read += 1;
18585            if next_offset >= end_offset {
18586                return Ok(());
18587            }
18588
18589            // Decode unknown envelopes for gaps in ordinals.
18590            while _next_ordinal_to_read < 1 {
18591                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18592                _next_ordinal_to_read += 1;
18593                next_offset += envelope_size;
18594            }
18595
18596            let next_out_of_line = decoder.next_out_of_line();
18597            let handles_before = decoder.remaining_handles();
18598            if let Some((inlined, num_bytes, num_handles)) =
18599                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18600            {
18601                let member_inline_size =
18602                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18603                if inlined != (member_inline_size <= 4) {
18604                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18605                }
18606                let inner_offset;
18607                let mut inner_depth = depth.clone();
18608                if inlined {
18609                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18610                    inner_offset = next_offset;
18611                } else {
18612                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18613                    inner_depth.increment()?;
18614                }
18615                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
18616                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
18617                });
18618                fidl::decode!(
18619                    fidl::Rights,
18620                    fidl::encoding::DefaultFuchsiaResourceDialect,
18621                    val_ref,
18622                    decoder,
18623                    inner_offset,
18624                    inner_depth
18625                )?;
18626                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18627                {
18628                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18629                }
18630                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18631                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18632                }
18633            }
18634
18635            next_offset += envelope_size;
18636            _next_ordinal_to_read += 1;
18637            if next_offset >= end_offset {
18638                return Ok(());
18639            }
18640
18641            // Decode unknown envelopes for gaps in ordinals.
18642            while _next_ordinal_to_read < 2 {
18643                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18644                _next_ordinal_to_read += 1;
18645                next_offset += envelope_size;
18646            }
18647
18648            let next_out_of_line = decoder.next_out_of_line();
18649            let handles_before = decoder.remaining_handles();
18650            if let Some((inlined, num_bytes, num_handles)) =
18651                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18652            {
18653                let member_inline_size = <fidl::encoding::Endpoint<
18654                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18655                > as fidl::encoding::TypeMarker>::inline_size(
18656                    decoder.context
18657                );
18658                if inlined != (member_inline_size <= 4) {
18659                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18660                }
18661                let inner_offset;
18662                let mut inner_depth = depth.clone();
18663                if inlined {
18664                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18665                    inner_offset = next_offset;
18666                } else {
18667                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18668                    inner_depth.increment()?;
18669                }
18670                let val_ref = self.token_request.get_or_insert_with(|| {
18671                    fidl::new_empty!(
18672                        fidl::encoding::Endpoint<
18673                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18674                        >,
18675                        fidl::encoding::DefaultFuchsiaResourceDialect
18676                    )
18677                });
18678                fidl::decode!(
18679                    fidl::encoding::Endpoint<
18680                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18681                    >,
18682                    fidl::encoding::DefaultFuchsiaResourceDialect,
18683                    val_ref,
18684                    decoder,
18685                    inner_offset,
18686                    inner_depth
18687                )?;
18688                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18689                {
18690                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18691                }
18692                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18693                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18694                }
18695            }
18696
18697            next_offset += envelope_size;
18698
18699            // Decode the remaining unknown envelopes.
18700            while next_offset < end_offset {
18701                _next_ordinal_to_read += 1;
18702                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18703                next_offset += envelope_size;
18704            }
18705
18706            Ok(())
18707        }
18708    }
18709
18710    impl BufferCollectionInfo {
18711        #[inline(always)]
18712        fn max_ordinal_present(&self) -> u64 {
18713            if let Some(_) = self.buffer_collection_id {
18714                return 3;
18715            }
18716            if let Some(_) = self.buffers {
18717                return 2;
18718            }
18719            if let Some(_) = self.settings {
18720                return 1;
18721            }
18722            0
18723        }
18724    }
18725
18726    impl fidl::encoding::ResourceTypeMarker for BufferCollectionInfo {
18727        type Borrowed<'a> = &'a mut Self;
18728        fn take_or_borrow<'a>(
18729            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18730        ) -> Self::Borrowed<'a> {
18731            value
18732        }
18733    }
18734
18735    unsafe impl fidl::encoding::TypeMarker for BufferCollectionInfo {
18736        type Owned = Self;
18737
18738        #[inline(always)]
18739        fn inline_align(_context: fidl::encoding::Context) -> usize {
18740            8
18741        }
18742
18743        #[inline(always)]
18744        fn inline_size(_context: fidl::encoding::Context) -> usize {
18745            16
18746        }
18747    }
18748
18749    unsafe impl
18750        fidl::encoding::Encode<BufferCollectionInfo, fidl::encoding::DefaultFuchsiaResourceDialect>
18751        for &mut BufferCollectionInfo
18752    {
18753        unsafe fn encode(
18754            self,
18755            encoder: &mut fidl::encoding::Encoder<
18756                '_,
18757                fidl::encoding::DefaultFuchsiaResourceDialect,
18758            >,
18759            offset: usize,
18760            mut depth: fidl::encoding::Depth,
18761        ) -> fidl::Result<()> {
18762            encoder.debug_check_bounds::<BufferCollectionInfo>(offset);
18763            // Vector header
18764            let max_ordinal: u64 = self.max_ordinal_present();
18765            encoder.write_num(max_ordinal, offset);
18766            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18767            // Calling encoder.out_of_line_offset(0) is not allowed.
18768            if max_ordinal == 0 {
18769                return Ok(());
18770            }
18771            depth.increment()?;
18772            let envelope_size = 8;
18773            let bytes_len = max_ordinal as usize * envelope_size;
18774            #[allow(unused_variables)]
18775            let offset = encoder.out_of_line_offset(bytes_len);
18776            let mut _prev_end_offset: usize = 0;
18777            if 1 > max_ordinal {
18778                return Ok(());
18779            }
18780
18781            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18782            // are envelope_size bytes.
18783            let cur_offset: usize = (1 - 1) * envelope_size;
18784
18785            // Zero reserved fields.
18786            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18787
18788            // Safety:
18789            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18790            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18791            //   envelope_size bytes, there is always sufficient room.
18792            fidl::encoding::encode_in_envelope_optional::<
18793                SingleBufferSettings,
18794                fidl::encoding::DefaultFuchsiaResourceDialect,
18795            >(
18796                self.settings
18797                    .as_ref()
18798                    .map(<SingleBufferSettings as fidl::encoding::ValueTypeMarker>::borrow),
18799                encoder,
18800                offset + cur_offset,
18801                depth,
18802            )?;
18803
18804            _prev_end_offset = cur_offset + envelope_size;
18805            if 2 > max_ordinal {
18806                return Ok(());
18807            }
18808
18809            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18810            // are envelope_size bytes.
18811            let cur_offset: usize = (2 - 1) * envelope_size;
18812
18813            // Zero reserved fields.
18814            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18815
18816            // Safety:
18817            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18818            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18819            //   envelope_size bytes, there is always sufficient room.
18820            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect>(
18821            self.buffers.as_mut().map(<fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
18822            encoder, offset + cur_offset, depth
18823        )?;
18824
18825            _prev_end_offset = cur_offset + envelope_size;
18826            if 3 > max_ordinal {
18827                return Ok(());
18828            }
18829
18830            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18831            // are envelope_size bytes.
18832            let cur_offset: usize = (3 - 1) * envelope_size;
18833
18834            // Zero reserved fields.
18835            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18836
18837            // Safety:
18838            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18839            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18840            //   envelope_size bytes, there is always sufficient room.
18841            fidl::encoding::encode_in_envelope_optional::<
18842                u64,
18843                fidl::encoding::DefaultFuchsiaResourceDialect,
18844            >(
18845                self.buffer_collection_id
18846                    .as_ref()
18847                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
18848                encoder,
18849                offset + cur_offset,
18850                depth,
18851            )?;
18852
18853            _prev_end_offset = cur_offset + envelope_size;
18854
18855            Ok(())
18856        }
18857    }
18858
18859    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18860        for BufferCollectionInfo
18861    {
18862        #[inline(always)]
18863        fn new_empty() -> Self {
18864            Self::default()
18865        }
18866
18867        unsafe fn decode(
18868            &mut self,
18869            decoder: &mut fidl::encoding::Decoder<
18870                '_,
18871                fidl::encoding::DefaultFuchsiaResourceDialect,
18872            >,
18873            offset: usize,
18874            mut depth: fidl::encoding::Depth,
18875        ) -> fidl::Result<()> {
18876            decoder.debug_check_bounds::<Self>(offset);
18877            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18878                None => return Err(fidl::Error::NotNullable),
18879                Some(len) => len,
18880            };
18881            // Calling decoder.out_of_line_offset(0) is not allowed.
18882            if len == 0 {
18883                return Ok(());
18884            };
18885            depth.increment()?;
18886            let envelope_size = 8;
18887            let bytes_len = len * envelope_size;
18888            let offset = decoder.out_of_line_offset(bytes_len)?;
18889            // Decode the envelope for each type.
18890            let mut _next_ordinal_to_read = 0;
18891            let mut next_offset = offset;
18892            let end_offset = offset + bytes_len;
18893            _next_ordinal_to_read += 1;
18894            if next_offset >= end_offset {
18895                return Ok(());
18896            }
18897
18898            // Decode unknown envelopes for gaps in ordinals.
18899            while _next_ordinal_to_read < 1 {
18900                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18901                _next_ordinal_to_read += 1;
18902                next_offset += envelope_size;
18903            }
18904
18905            let next_out_of_line = decoder.next_out_of_line();
18906            let handles_before = decoder.remaining_handles();
18907            if let Some((inlined, num_bytes, num_handles)) =
18908                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18909            {
18910                let member_inline_size =
18911                    <SingleBufferSettings as fidl::encoding::TypeMarker>::inline_size(
18912                        decoder.context,
18913                    );
18914                if inlined != (member_inline_size <= 4) {
18915                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18916                }
18917                let inner_offset;
18918                let mut inner_depth = depth.clone();
18919                if inlined {
18920                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18921                    inner_offset = next_offset;
18922                } else {
18923                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18924                    inner_depth.increment()?;
18925                }
18926                let val_ref = self.settings.get_or_insert_with(|| {
18927                    fidl::new_empty!(
18928                        SingleBufferSettings,
18929                        fidl::encoding::DefaultFuchsiaResourceDialect
18930                    )
18931                });
18932                fidl::decode!(
18933                    SingleBufferSettings,
18934                    fidl::encoding::DefaultFuchsiaResourceDialect,
18935                    val_ref,
18936                    decoder,
18937                    inner_offset,
18938                    inner_depth
18939                )?;
18940                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18941                {
18942                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18943                }
18944                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18945                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18946                }
18947            }
18948
18949            next_offset += envelope_size;
18950            _next_ordinal_to_read += 1;
18951            if next_offset >= end_offset {
18952                return Ok(());
18953            }
18954
18955            // Decode unknown envelopes for gaps in ordinals.
18956            while _next_ordinal_to_read < 2 {
18957                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18958                _next_ordinal_to_read += 1;
18959                next_offset += envelope_size;
18960            }
18961
18962            let next_out_of_line = decoder.next_out_of_line();
18963            let handles_before = decoder.remaining_handles();
18964            if let Some((inlined, num_bytes, num_handles)) =
18965                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18966            {
18967                let member_inline_size = <fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18968                if inlined != (member_inline_size <= 4) {
18969                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18970                }
18971                let inner_offset;
18972                let mut inner_depth = depth.clone();
18973                if inlined {
18974                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18975                    inner_offset = next_offset;
18976                } else {
18977                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18978                    inner_depth.increment()?;
18979                }
18980                let val_ref =
18981                self.buffers.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect));
18982                fidl::decode!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18983                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18984                {
18985                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18986                }
18987                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18988                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18989                }
18990            }
18991
18992            next_offset += envelope_size;
18993            _next_ordinal_to_read += 1;
18994            if next_offset >= end_offset {
18995                return Ok(());
18996            }
18997
18998            // Decode unknown envelopes for gaps in ordinals.
18999            while _next_ordinal_to_read < 3 {
19000                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19001                _next_ordinal_to_read += 1;
19002                next_offset += envelope_size;
19003            }
19004
19005            let next_out_of_line = decoder.next_out_of_line();
19006            let handles_before = decoder.remaining_handles();
19007            if let Some((inlined, num_bytes, num_handles)) =
19008                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19009            {
19010                let member_inline_size =
19011                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19012                if inlined != (member_inline_size <= 4) {
19013                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19014                }
19015                let inner_offset;
19016                let mut inner_depth = depth.clone();
19017                if inlined {
19018                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19019                    inner_offset = next_offset;
19020                } else {
19021                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19022                    inner_depth.increment()?;
19023                }
19024                let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
19025                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
19026                });
19027                fidl::decode!(
19028                    u64,
19029                    fidl::encoding::DefaultFuchsiaResourceDialect,
19030                    val_ref,
19031                    decoder,
19032                    inner_offset,
19033                    inner_depth
19034                )?;
19035                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19036                {
19037                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19038                }
19039                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19040                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19041                }
19042            }
19043
19044            next_offset += envelope_size;
19045
19046            // Decode the remaining unknown envelopes.
19047            while next_offset < end_offset {
19048                _next_ordinal_to_read += 1;
19049                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19050                next_offset += envelope_size;
19051            }
19052
19053            Ok(())
19054        }
19055    }
19056
19057    impl BufferCollectionSetConstraintsRequest {
19058        #[inline(always)]
19059        fn max_ordinal_present(&self) -> u64 {
19060            if let Some(_) = self.constraints {
19061                return 1;
19062            }
19063            0
19064        }
19065    }
19066
19067    impl fidl::encoding::ResourceTypeMarker for BufferCollectionSetConstraintsRequest {
19068        type Borrowed<'a> = &'a mut Self;
19069        fn take_or_borrow<'a>(
19070            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19071        ) -> Self::Borrowed<'a> {
19072            value
19073        }
19074    }
19075
19076    unsafe impl fidl::encoding::TypeMarker for BufferCollectionSetConstraintsRequest {
19077        type Owned = Self;
19078
19079        #[inline(always)]
19080        fn inline_align(_context: fidl::encoding::Context) -> usize {
19081            8
19082        }
19083
19084        #[inline(always)]
19085        fn inline_size(_context: fidl::encoding::Context) -> usize {
19086            16
19087        }
19088    }
19089
19090    unsafe impl
19091        fidl::encoding::Encode<
19092            BufferCollectionSetConstraintsRequest,
19093            fidl::encoding::DefaultFuchsiaResourceDialect,
19094        > for &mut BufferCollectionSetConstraintsRequest
19095    {
19096        unsafe fn encode(
19097            self,
19098            encoder: &mut fidl::encoding::Encoder<
19099                '_,
19100                fidl::encoding::DefaultFuchsiaResourceDialect,
19101            >,
19102            offset: usize,
19103            mut depth: fidl::encoding::Depth,
19104        ) -> fidl::Result<()> {
19105            encoder.debug_check_bounds::<BufferCollectionSetConstraintsRequest>(offset);
19106            // Vector header
19107            let max_ordinal: u64 = self.max_ordinal_present();
19108            encoder.write_num(max_ordinal, offset);
19109            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19110            // Calling encoder.out_of_line_offset(0) is not allowed.
19111            if max_ordinal == 0 {
19112                return Ok(());
19113            }
19114            depth.increment()?;
19115            let envelope_size = 8;
19116            let bytes_len = max_ordinal as usize * envelope_size;
19117            #[allow(unused_variables)]
19118            let offset = encoder.out_of_line_offset(bytes_len);
19119            let mut _prev_end_offset: usize = 0;
19120            if 1 > max_ordinal {
19121                return Ok(());
19122            }
19123
19124            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19125            // are envelope_size bytes.
19126            let cur_offset: usize = (1 - 1) * envelope_size;
19127
19128            // Zero reserved fields.
19129            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19130
19131            // Safety:
19132            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19133            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19134            //   envelope_size bytes, there is always sufficient room.
19135            fidl::encoding::encode_in_envelope_optional::<
19136                BufferCollectionConstraints,
19137                fidl::encoding::DefaultFuchsiaResourceDialect,
19138            >(
19139                self.constraints
19140                    .as_ref()
19141                    .map(<BufferCollectionConstraints as fidl::encoding::ValueTypeMarker>::borrow),
19142                encoder,
19143                offset + cur_offset,
19144                depth,
19145            )?;
19146
19147            _prev_end_offset = cur_offset + envelope_size;
19148
19149            Ok(())
19150        }
19151    }
19152
19153    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19154        for BufferCollectionSetConstraintsRequest
19155    {
19156        #[inline(always)]
19157        fn new_empty() -> Self {
19158            Self::default()
19159        }
19160
19161        unsafe fn decode(
19162            &mut self,
19163            decoder: &mut fidl::encoding::Decoder<
19164                '_,
19165                fidl::encoding::DefaultFuchsiaResourceDialect,
19166            >,
19167            offset: usize,
19168            mut depth: fidl::encoding::Depth,
19169        ) -> fidl::Result<()> {
19170            decoder.debug_check_bounds::<Self>(offset);
19171            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19172                None => return Err(fidl::Error::NotNullable),
19173                Some(len) => len,
19174            };
19175            // Calling decoder.out_of_line_offset(0) is not allowed.
19176            if len == 0 {
19177                return Ok(());
19178            };
19179            depth.increment()?;
19180            let envelope_size = 8;
19181            let bytes_len = len * envelope_size;
19182            let offset = decoder.out_of_line_offset(bytes_len)?;
19183            // Decode the envelope for each type.
19184            let mut _next_ordinal_to_read = 0;
19185            let mut next_offset = offset;
19186            let end_offset = offset + bytes_len;
19187            _next_ordinal_to_read += 1;
19188            if next_offset >= end_offset {
19189                return Ok(());
19190            }
19191
19192            // Decode unknown envelopes for gaps in ordinals.
19193            while _next_ordinal_to_read < 1 {
19194                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19195                _next_ordinal_to_read += 1;
19196                next_offset += envelope_size;
19197            }
19198
19199            let next_out_of_line = decoder.next_out_of_line();
19200            let handles_before = decoder.remaining_handles();
19201            if let Some((inlined, num_bytes, num_handles)) =
19202                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19203            {
19204                let member_inline_size =
19205                    <BufferCollectionConstraints as fidl::encoding::TypeMarker>::inline_size(
19206                        decoder.context,
19207                    );
19208                if inlined != (member_inline_size <= 4) {
19209                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19210                }
19211                let inner_offset;
19212                let mut inner_depth = depth.clone();
19213                if inlined {
19214                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19215                    inner_offset = next_offset;
19216                } else {
19217                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19218                    inner_depth.increment()?;
19219                }
19220                let val_ref = self.constraints.get_or_insert_with(|| {
19221                    fidl::new_empty!(
19222                        BufferCollectionConstraints,
19223                        fidl::encoding::DefaultFuchsiaResourceDialect
19224                    )
19225                });
19226                fidl::decode!(
19227                    BufferCollectionConstraints,
19228                    fidl::encoding::DefaultFuchsiaResourceDialect,
19229                    val_ref,
19230                    decoder,
19231                    inner_offset,
19232                    inner_depth
19233                )?;
19234                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19235                {
19236                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19237                }
19238                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19239                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19240                }
19241            }
19242
19243            next_offset += envelope_size;
19244
19245            // Decode the remaining unknown envelopes.
19246            while next_offset < end_offset {
19247                _next_ordinal_to_read += 1;
19248                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19249                next_offset += envelope_size;
19250            }
19251
19252            Ok(())
19253        }
19254    }
19255
19256    impl BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
19257        #[inline(always)]
19258        fn max_ordinal_present(&self) -> u64 {
19259            if let Some(_) = self.group_request {
19260                return 1;
19261            }
19262            0
19263        }
19264    }
19265
19266    impl fidl::encoding::ResourceTypeMarker
19267        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19268    {
19269        type Borrowed<'a> = &'a mut Self;
19270        fn take_or_borrow<'a>(
19271            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19272        ) -> Self::Borrowed<'a> {
19273            value
19274        }
19275    }
19276
19277    unsafe impl fidl::encoding::TypeMarker
19278        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19279    {
19280        type Owned = Self;
19281
19282        #[inline(always)]
19283        fn inline_align(_context: fidl::encoding::Context) -> usize {
19284            8
19285        }
19286
19287        #[inline(always)]
19288        fn inline_size(_context: fidl::encoding::Context) -> usize {
19289            16
19290        }
19291    }
19292
19293    unsafe impl
19294        fidl::encoding::Encode<
19295            BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
19296            fidl::encoding::DefaultFuchsiaResourceDialect,
19297        > for &mut BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19298    {
19299        unsafe fn encode(
19300            self,
19301            encoder: &mut fidl::encoding::Encoder<
19302                '_,
19303                fidl::encoding::DefaultFuchsiaResourceDialect,
19304            >,
19305            offset: usize,
19306            mut depth: fidl::encoding::Depth,
19307        ) -> fidl::Result<()> {
19308            encoder
19309                .debug_check_bounds::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
19310                    offset,
19311                );
19312            // Vector header
19313            let max_ordinal: u64 = self.max_ordinal_present();
19314            encoder.write_num(max_ordinal, offset);
19315            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19316            // Calling encoder.out_of_line_offset(0) is not allowed.
19317            if max_ordinal == 0 {
19318                return Ok(());
19319            }
19320            depth.increment()?;
19321            let envelope_size = 8;
19322            let bytes_len = max_ordinal as usize * envelope_size;
19323            #[allow(unused_variables)]
19324            let offset = encoder.out_of_line_offset(bytes_len);
19325            let mut _prev_end_offset: usize = 0;
19326            if 1 > max_ordinal {
19327                return Ok(());
19328            }
19329
19330            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19331            // are envelope_size bytes.
19332            let cur_offset: usize = (1 - 1) * envelope_size;
19333
19334            // Zero reserved fields.
19335            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19336
19337            // Safety:
19338            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19339            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19340            //   envelope_size bytes, there is always sufficient room.
19341            fidl::encoding::encode_in_envelope_optional::<
19342                fidl::encoding::Endpoint<
19343                    fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19344                >,
19345                fidl::encoding::DefaultFuchsiaResourceDialect,
19346            >(
19347                self.group_request.as_mut().map(
19348                    <fidl::encoding::Endpoint<
19349                        fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19350                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19351                ),
19352                encoder,
19353                offset + cur_offset,
19354                depth,
19355            )?;
19356
19357            _prev_end_offset = cur_offset + envelope_size;
19358
19359            Ok(())
19360        }
19361    }
19362
19363    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19364        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19365    {
19366        #[inline(always)]
19367        fn new_empty() -> Self {
19368            Self::default()
19369        }
19370
19371        unsafe fn decode(
19372            &mut self,
19373            decoder: &mut fidl::encoding::Decoder<
19374                '_,
19375                fidl::encoding::DefaultFuchsiaResourceDialect,
19376            >,
19377            offset: usize,
19378            mut depth: fidl::encoding::Depth,
19379        ) -> fidl::Result<()> {
19380            decoder.debug_check_bounds::<Self>(offset);
19381            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19382                None => return Err(fidl::Error::NotNullable),
19383                Some(len) => len,
19384            };
19385            // Calling decoder.out_of_line_offset(0) is not allowed.
19386            if len == 0 {
19387                return Ok(());
19388            };
19389            depth.increment()?;
19390            let envelope_size = 8;
19391            let bytes_len = len * envelope_size;
19392            let offset = decoder.out_of_line_offset(bytes_len)?;
19393            // Decode the envelope for each type.
19394            let mut _next_ordinal_to_read = 0;
19395            let mut next_offset = offset;
19396            let end_offset = offset + bytes_len;
19397            _next_ordinal_to_read += 1;
19398            if next_offset >= end_offset {
19399                return Ok(());
19400            }
19401
19402            // Decode unknown envelopes for gaps in ordinals.
19403            while _next_ordinal_to_read < 1 {
19404                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19405                _next_ordinal_to_read += 1;
19406                next_offset += envelope_size;
19407            }
19408
19409            let next_out_of_line = decoder.next_out_of_line();
19410            let handles_before = decoder.remaining_handles();
19411            if let Some((inlined, num_bytes, num_handles)) =
19412                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19413            {
19414                let member_inline_size = <fidl::encoding::Endpoint<
19415                    fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19416                > as fidl::encoding::TypeMarker>::inline_size(
19417                    decoder.context
19418                );
19419                if inlined != (member_inline_size <= 4) {
19420                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19421                }
19422                let inner_offset;
19423                let mut inner_depth = depth.clone();
19424                if inlined {
19425                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19426                    inner_offset = next_offset;
19427                } else {
19428                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19429                    inner_depth.increment()?;
19430                }
19431                let val_ref = self.group_request.get_or_insert_with(|| {
19432                    fidl::new_empty!(
19433                        fidl::encoding::Endpoint<
19434                            fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19435                        >,
19436                        fidl::encoding::DefaultFuchsiaResourceDialect
19437                    )
19438                });
19439                fidl::decode!(
19440                    fidl::encoding::Endpoint<
19441                        fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19442                    >,
19443                    fidl::encoding::DefaultFuchsiaResourceDialect,
19444                    val_ref,
19445                    decoder,
19446                    inner_offset,
19447                    inner_depth
19448                )?;
19449                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19450                {
19451                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19452                }
19453                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19454                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19455                }
19456            }
19457
19458            next_offset += envelope_size;
19459
19460            // Decode the remaining unknown envelopes.
19461            while next_offset < end_offset {
19462                _next_ordinal_to_read += 1;
19463                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19464                next_offset += envelope_size;
19465            }
19466
19467            Ok(())
19468        }
19469    }
19470
19471    impl BufferCollectionTokenDuplicateRequest {
19472        #[inline(always)]
19473        fn max_ordinal_present(&self) -> u64 {
19474            if let Some(_) = self.token_request {
19475                return 2;
19476            }
19477            if let Some(_) = self.rights_attenuation_mask {
19478                return 1;
19479            }
19480            0
19481        }
19482    }
19483
19484    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateRequest {
19485        type Borrowed<'a> = &'a mut Self;
19486        fn take_or_borrow<'a>(
19487            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19488        ) -> Self::Borrowed<'a> {
19489            value
19490        }
19491    }
19492
19493    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateRequest {
19494        type Owned = Self;
19495
19496        #[inline(always)]
19497        fn inline_align(_context: fidl::encoding::Context) -> usize {
19498            8
19499        }
19500
19501        #[inline(always)]
19502        fn inline_size(_context: fidl::encoding::Context) -> usize {
19503            16
19504        }
19505    }
19506
19507    unsafe impl
19508        fidl::encoding::Encode<
19509            BufferCollectionTokenDuplicateRequest,
19510            fidl::encoding::DefaultFuchsiaResourceDialect,
19511        > for &mut BufferCollectionTokenDuplicateRequest
19512    {
19513        unsafe fn encode(
19514            self,
19515            encoder: &mut fidl::encoding::Encoder<
19516                '_,
19517                fidl::encoding::DefaultFuchsiaResourceDialect,
19518            >,
19519            offset: usize,
19520            mut depth: fidl::encoding::Depth,
19521        ) -> fidl::Result<()> {
19522            encoder.debug_check_bounds::<BufferCollectionTokenDuplicateRequest>(offset);
19523            // Vector header
19524            let max_ordinal: u64 = self.max_ordinal_present();
19525            encoder.write_num(max_ordinal, offset);
19526            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19527            // Calling encoder.out_of_line_offset(0) is not allowed.
19528            if max_ordinal == 0 {
19529                return Ok(());
19530            }
19531            depth.increment()?;
19532            let envelope_size = 8;
19533            let bytes_len = max_ordinal as usize * envelope_size;
19534            #[allow(unused_variables)]
19535            let offset = encoder.out_of_line_offset(bytes_len);
19536            let mut _prev_end_offset: usize = 0;
19537            if 1 > max_ordinal {
19538                return Ok(());
19539            }
19540
19541            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19542            // are envelope_size bytes.
19543            let cur_offset: usize = (1 - 1) * envelope_size;
19544
19545            // Zero reserved fields.
19546            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19547
19548            // Safety:
19549            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19550            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19551            //   envelope_size bytes, there is always sufficient room.
19552            fidl::encoding::encode_in_envelope_optional::<
19553                fidl::Rights,
19554                fidl::encoding::DefaultFuchsiaResourceDialect,
19555            >(
19556                self.rights_attenuation_mask
19557                    .as_ref()
19558                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19559                encoder,
19560                offset + cur_offset,
19561                depth,
19562            )?;
19563
19564            _prev_end_offset = cur_offset + envelope_size;
19565            if 2 > max_ordinal {
19566                return Ok(());
19567            }
19568
19569            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19570            // are envelope_size bytes.
19571            let cur_offset: usize = (2 - 1) * envelope_size;
19572
19573            // Zero reserved fields.
19574            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19575
19576            // Safety:
19577            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19578            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19579            //   envelope_size bytes, there is always sufficient room.
19580            fidl::encoding::encode_in_envelope_optional::<
19581                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19582                fidl::encoding::DefaultFuchsiaResourceDialect,
19583            >(
19584                self.token_request.as_mut().map(
19585                    <fidl::encoding::Endpoint<
19586                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19587                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19588                ),
19589                encoder,
19590                offset + cur_offset,
19591                depth,
19592            )?;
19593
19594            _prev_end_offset = cur_offset + envelope_size;
19595
19596            Ok(())
19597        }
19598    }
19599
19600    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19601        for BufferCollectionTokenDuplicateRequest
19602    {
19603        #[inline(always)]
19604        fn new_empty() -> Self {
19605            Self::default()
19606        }
19607
19608        unsafe fn decode(
19609            &mut self,
19610            decoder: &mut fidl::encoding::Decoder<
19611                '_,
19612                fidl::encoding::DefaultFuchsiaResourceDialect,
19613            >,
19614            offset: usize,
19615            mut depth: fidl::encoding::Depth,
19616        ) -> fidl::Result<()> {
19617            decoder.debug_check_bounds::<Self>(offset);
19618            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19619                None => return Err(fidl::Error::NotNullable),
19620                Some(len) => len,
19621            };
19622            // Calling decoder.out_of_line_offset(0) is not allowed.
19623            if len == 0 {
19624                return Ok(());
19625            };
19626            depth.increment()?;
19627            let envelope_size = 8;
19628            let bytes_len = len * envelope_size;
19629            let offset = decoder.out_of_line_offset(bytes_len)?;
19630            // Decode the envelope for each type.
19631            let mut _next_ordinal_to_read = 0;
19632            let mut next_offset = offset;
19633            let end_offset = offset + bytes_len;
19634            _next_ordinal_to_read += 1;
19635            if next_offset >= end_offset {
19636                return Ok(());
19637            }
19638
19639            // Decode unknown envelopes for gaps in ordinals.
19640            while _next_ordinal_to_read < 1 {
19641                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19642                _next_ordinal_to_read += 1;
19643                next_offset += envelope_size;
19644            }
19645
19646            let next_out_of_line = decoder.next_out_of_line();
19647            let handles_before = decoder.remaining_handles();
19648            if let Some((inlined, num_bytes, num_handles)) =
19649                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19650            {
19651                let member_inline_size =
19652                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19653                if inlined != (member_inline_size <= 4) {
19654                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19655                }
19656                let inner_offset;
19657                let mut inner_depth = depth.clone();
19658                if inlined {
19659                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19660                    inner_offset = next_offset;
19661                } else {
19662                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19663                    inner_depth.increment()?;
19664                }
19665                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
19666                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
19667                });
19668                fidl::decode!(
19669                    fidl::Rights,
19670                    fidl::encoding::DefaultFuchsiaResourceDialect,
19671                    val_ref,
19672                    decoder,
19673                    inner_offset,
19674                    inner_depth
19675                )?;
19676                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19677                {
19678                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19679                }
19680                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19681                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19682                }
19683            }
19684
19685            next_offset += envelope_size;
19686            _next_ordinal_to_read += 1;
19687            if next_offset >= end_offset {
19688                return Ok(());
19689            }
19690
19691            // Decode unknown envelopes for gaps in ordinals.
19692            while _next_ordinal_to_read < 2 {
19693                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19694                _next_ordinal_to_read += 1;
19695                next_offset += envelope_size;
19696            }
19697
19698            let next_out_of_line = decoder.next_out_of_line();
19699            let handles_before = decoder.remaining_handles();
19700            if let Some((inlined, num_bytes, num_handles)) =
19701                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19702            {
19703                let member_inline_size = <fidl::encoding::Endpoint<
19704                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19705                > as fidl::encoding::TypeMarker>::inline_size(
19706                    decoder.context
19707                );
19708                if inlined != (member_inline_size <= 4) {
19709                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19710                }
19711                let inner_offset;
19712                let mut inner_depth = depth.clone();
19713                if inlined {
19714                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19715                    inner_offset = next_offset;
19716                } else {
19717                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19718                    inner_depth.increment()?;
19719                }
19720                let val_ref = self.token_request.get_or_insert_with(|| {
19721                    fidl::new_empty!(
19722                        fidl::encoding::Endpoint<
19723                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19724                        >,
19725                        fidl::encoding::DefaultFuchsiaResourceDialect
19726                    )
19727                });
19728                fidl::decode!(
19729                    fidl::encoding::Endpoint<
19730                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19731                    >,
19732                    fidl::encoding::DefaultFuchsiaResourceDialect,
19733                    val_ref,
19734                    decoder,
19735                    inner_offset,
19736                    inner_depth
19737                )?;
19738                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19739                {
19740                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19741                }
19742                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19743                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19744                }
19745            }
19746
19747            next_offset += envelope_size;
19748
19749            // Decode the remaining unknown envelopes.
19750            while next_offset < end_offset {
19751                _next_ordinal_to_read += 1;
19752                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19753                next_offset += envelope_size;
19754            }
19755
19756            Ok(())
19757        }
19758    }
19759
19760    impl BufferCollectionTokenGroupCreateChildRequest {
19761        #[inline(always)]
19762        fn max_ordinal_present(&self) -> u64 {
19763            if let Some(_) = self.rights_attenuation_mask {
19764                return 2;
19765            }
19766            if let Some(_) = self.token_request {
19767                return 1;
19768            }
19769            0
19770        }
19771    }
19772
19773    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19774        type Borrowed<'a> = &'a mut Self;
19775        fn take_or_borrow<'a>(
19776            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19777        ) -> Self::Borrowed<'a> {
19778            value
19779        }
19780    }
19781
19782    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19783        type Owned = Self;
19784
19785        #[inline(always)]
19786        fn inline_align(_context: fidl::encoding::Context) -> usize {
19787            8
19788        }
19789
19790        #[inline(always)]
19791        fn inline_size(_context: fidl::encoding::Context) -> usize {
19792            16
19793        }
19794    }
19795
19796    unsafe impl
19797        fidl::encoding::Encode<
19798            BufferCollectionTokenGroupCreateChildRequest,
19799            fidl::encoding::DefaultFuchsiaResourceDialect,
19800        > for &mut BufferCollectionTokenGroupCreateChildRequest
19801    {
19802        unsafe fn encode(
19803            self,
19804            encoder: &mut fidl::encoding::Encoder<
19805                '_,
19806                fidl::encoding::DefaultFuchsiaResourceDialect,
19807            >,
19808            offset: usize,
19809            mut depth: fidl::encoding::Depth,
19810        ) -> fidl::Result<()> {
19811            encoder.debug_check_bounds::<BufferCollectionTokenGroupCreateChildRequest>(offset);
19812            // Vector header
19813            let max_ordinal: u64 = self.max_ordinal_present();
19814            encoder.write_num(max_ordinal, offset);
19815            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19816            // Calling encoder.out_of_line_offset(0) is not allowed.
19817            if max_ordinal == 0 {
19818                return Ok(());
19819            }
19820            depth.increment()?;
19821            let envelope_size = 8;
19822            let bytes_len = max_ordinal as usize * envelope_size;
19823            #[allow(unused_variables)]
19824            let offset = encoder.out_of_line_offset(bytes_len);
19825            let mut _prev_end_offset: usize = 0;
19826            if 1 > max_ordinal {
19827                return Ok(());
19828            }
19829
19830            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19831            // are envelope_size bytes.
19832            let cur_offset: usize = (1 - 1) * envelope_size;
19833
19834            // Zero reserved fields.
19835            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19836
19837            // Safety:
19838            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19839            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19840            //   envelope_size bytes, there is always sufficient room.
19841            fidl::encoding::encode_in_envelope_optional::<
19842                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19843                fidl::encoding::DefaultFuchsiaResourceDialect,
19844            >(
19845                self.token_request.as_mut().map(
19846                    <fidl::encoding::Endpoint<
19847                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19848                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19849                ),
19850                encoder,
19851                offset + cur_offset,
19852                depth,
19853            )?;
19854
19855            _prev_end_offset = cur_offset + envelope_size;
19856            if 2 > max_ordinal {
19857                return Ok(());
19858            }
19859
19860            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19861            // are envelope_size bytes.
19862            let cur_offset: usize = (2 - 1) * envelope_size;
19863
19864            // Zero reserved fields.
19865            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19866
19867            // Safety:
19868            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19869            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19870            //   envelope_size bytes, there is always sufficient room.
19871            fidl::encoding::encode_in_envelope_optional::<
19872                fidl::Rights,
19873                fidl::encoding::DefaultFuchsiaResourceDialect,
19874            >(
19875                self.rights_attenuation_mask
19876                    .as_ref()
19877                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19878                encoder,
19879                offset + cur_offset,
19880                depth,
19881            )?;
19882
19883            _prev_end_offset = cur_offset + envelope_size;
19884
19885            Ok(())
19886        }
19887    }
19888
19889    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19890        for BufferCollectionTokenGroupCreateChildRequest
19891    {
19892        #[inline(always)]
19893        fn new_empty() -> Self {
19894            Self::default()
19895        }
19896
19897        unsafe fn decode(
19898            &mut self,
19899            decoder: &mut fidl::encoding::Decoder<
19900                '_,
19901                fidl::encoding::DefaultFuchsiaResourceDialect,
19902            >,
19903            offset: usize,
19904            mut depth: fidl::encoding::Depth,
19905        ) -> fidl::Result<()> {
19906            decoder.debug_check_bounds::<Self>(offset);
19907            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19908                None => return Err(fidl::Error::NotNullable),
19909                Some(len) => len,
19910            };
19911            // Calling decoder.out_of_line_offset(0) is not allowed.
19912            if len == 0 {
19913                return Ok(());
19914            };
19915            depth.increment()?;
19916            let envelope_size = 8;
19917            let bytes_len = len * envelope_size;
19918            let offset = decoder.out_of_line_offset(bytes_len)?;
19919            // Decode the envelope for each type.
19920            let mut _next_ordinal_to_read = 0;
19921            let mut next_offset = offset;
19922            let end_offset = offset + bytes_len;
19923            _next_ordinal_to_read += 1;
19924            if next_offset >= end_offset {
19925                return Ok(());
19926            }
19927
19928            // Decode unknown envelopes for gaps in ordinals.
19929            while _next_ordinal_to_read < 1 {
19930                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19931                _next_ordinal_to_read += 1;
19932                next_offset += envelope_size;
19933            }
19934
19935            let next_out_of_line = decoder.next_out_of_line();
19936            let handles_before = decoder.remaining_handles();
19937            if let Some((inlined, num_bytes, num_handles)) =
19938                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19939            {
19940                let member_inline_size = <fidl::encoding::Endpoint<
19941                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19942                > as fidl::encoding::TypeMarker>::inline_size(
19943                    decoder.context
19944                );
19945                if inlined != (member_inline_size <= 4) {
19946                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19947                }
19948                let inner_offset;
19949                let mut inner_depth = depth.clone();
19950                if inlined {
19951                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19952                    inner_offset = next_offset;
19953                } else {
19954                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19955                    inner_depth.increment()?;
19956                }
19957                let val_ref = self.token_request.get_or_insert_with(|| {
19958                    fidl::new_empty!(
19959                        fidl::encoding::Endpoint<
19960                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19961                        >,
19962                        fidl::encoding::DefaultFuchsiaResourceDialect
19963                    )
19964                });
19965                fidl::decode!(
19966                    fidl::encoding::Endpoint<
19967                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19968                    >,
19969                    fidl::encoding::DefaultFuchsiaResourceDialect,
19970                    val_ref,
19971                    decoder,
19972                    inner_offset,
19973                    inner_depth
19974                )?;
19975                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19976                {
19977                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19978                }
19979                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19980                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19981                }
19982            }
19983
19984            next_offset += envelope_size;
19985            _next_ordinal_to_read += 1;
19986            if next_offset >= end_offset {
19987                return Ok(());
19988            }
19989
19990            // Decode unknown envelopes for gaps in ordinals.
19991            while _next_ordinal_to_read < 2 {
19992                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19993                _next_ordinal_to_read += 1;
19994                next_offset += envelope_size;
19995            }
19996
19997            let next_out_of_line = decoder.next_out_of_line();
19998            let handles_before = decoder.remaining_handles();
19999            if let Some((inlined, num_bytes, num_handles)) =
20000                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20001            {
20002                let member_inline_size =
20003                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
20004                if inlined != (member_inline_size <= 4) {
20005                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20006                }
20007                let inner_offset;
20008                let mut inner_depth = depth.clone();
20009                if inlined {
20010                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20011                    inner_offset = next_offset;
20012                } else {
20013                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20014                    inner_depth.increment()?;
20015                }
20016                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
20017                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
20018                });
20019                fidl::decode!(
20020                    fidl::Rights,
20021                    fidl::encoding::DefaultFuchsiaResourceDialect,
20022                    val_ref,
20023                    decoder,
20024                    inner_offset,
20025                    inner_depth
20026                )?;
20027                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20028                {
20029                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20030                }
20031                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20032                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20033                }
20034            }
20035
20036            next_offset += envelope_size;
20037
20038            // Decode the remaining unknown envelopes.
20039            while next_offset < end_offset {
20040                _next_ordinal_to_read += 1;
20041                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20042                next_offset += envelope_size;
20043            }
20044
20045            Ok(())
20046        }
20047    }
20048
20049    impl BufferCollectionTokenGroupCreateChildrenSyncResponse {
20050        #[inline(always)]
20051        fn max_ordinal_present(&self) -> u64 {
20052            if let Some(_) = self.tokens {
20053                return 1;
20054            }
20055            0
20056        }
20057    }
20058
20059    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20060        type Borrowed<'a> = &'a mut Self;
20061        fn take_or_borrow<'a>(
20062            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20063        ) -> Self::Borrowed<'a> {
20064            value
20065        }
20066    }
20067
20068    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20069        type Owned = Self;
20070
20071        #[inline(always)]
20072        fn inline_align(_context: fidl::encoding::Context) -> usize {
20073            8
20074        }
20075
20076        #[inline(always)]
20077        fn inline_size(_context: fidl::encoding::Context) -> usize {
20078            16
20079        }
20080    }
20081
20082    unsafe impl
20083        fidl::encoding::Encode<
20084            BufferCollectionTokenGroupCreateChildrenSyncResponse,
20085            fidl::encoding::DefaultFuchsiaResourceDialect,
20086        > for &mut BufferCollectionTokenGroupCreateChildrenSyncResponse
20087    {
20088        unsafe fn encode(
20089            self,
20090            encoder: &mut fidl::encoding::Encoder<
20091                '_,
20092                fidl::encoding::DefaultFuchsiaResourceDialect,
20093            >,
20094            offset: usize,
20095            mut depth: fidl::encoding::Depth,
20096        ) -> fidl::Result<()> {
20097            encoder
20098                .debug_check_bounds::<BufferCollectionTokenGroupCreateChildrenSyncResponse>(offset);
20099            // Vector header
20100            let max_ordinal: u64 = self.max_ordinal_present();
20101            encoder.write_num(max_ordinal, offset);
20102            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20103            // Calling encoder.out_of_line_offset(0) is not allowed.
20104            if max_ordinal == 0 {
20105                return Ok(());
20106            }
20107            depth.increment()?;
20108            let envelope_size = 8;
20109            let bytes_len = max_ordinal as usize * envelope_size;
20110            #[allow(unused_variables)]
20111            let offset = encoder.out_of_line_offset(bytes_len);
20112            let mut _prev_end_offset: usize = 0;
20113            if 1 > max_ordinal {
20114                return Ok(());
20115            }
20116
20117            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20118            // are envelope_size bytes.
20119            let cur_offset: usize = (1 - 1) * envelope_size;
20120
20121            // Zero reserved fields.
20122            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20123
20124            // Safety:
20125            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20126            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20127            //   envelope_size bytes, there is always sufficient room.
20128            fidl::encoding::encode_in_envelope_optional::<
20129                fidl::encoding::Vector<
20130                    fidl::encoding::Endpoint<
20131                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20132                    >,
20133                    64,
20134                >,
20135                fidl::encoding::DefaultFuchsiaResourceDialect,
20136            >(
20137                self.tokens.as_mut().map(
20138                    <fidl::encoding::Vector<
20139                        fidl::encoding::Endpoint<
20140                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20141                        >,
20142                        64,
20143                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20144                ),
20145                encoder,
20146                offset + cur_offset,
20147                depth,
20148            )?;
20149
20150            _prev_end_offset = cur_offset + envelope_size;
20151
20152            Ok(())
20153        }
20154    }
20155
20156    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20157        for BufferCollectionTokenGroupCreateChildrenSyncResponse
20158    {
20159        #[inline(always)]
20160        fn new_empty() -> Self {
20161            Self::default()
20162        }
20163
20164        unsafe fn decode(
20165            &mut self,
20166            decoder: &mut fidl::encoding::Decoder<
20167                '_,
20168                fidl::encoding::DefaultFuchsiaResourceDialect,
20169            >,
20170            offset: usize,
20171            mut depth: fidl::encoding::Depth,
20172        ) -> fidl::Result<()> {
20173            decoder.debug_check_bounds::<Self>(offset);
20174            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20175                None => return Err(fidl::Error::NotNullable),
20176                Some(len) => len,
20177            };
20178            // Calling decoder.out_of_line_offset(0) is not allowed.
20179            if len == 0 {
20180                return Ok(());
20181            };
20182            depth.increment()?;
20183            let envelope_size = 8;
20184            let bytes_len = len * envelope_size;
20185            let offset = decoder.out_of_line_offset(bytes_len)?;
20186            // Decode the envelope for each type.
20187            let mut _next_ordinal_to_read = 0;
20188            let mut next_offset = offset;
20189            let end_offset = offset + bytes_len;
20190            _next_ordinal_to_read += 1;
20191            if next_offset >= end_offset {
20192                return Ok(());
20193            }
20194
20195            // Decode unknown envelopes for gaps in ordinals.
20196            while _next_ordinal_to_read < 1 {
20197                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20198                _next_ordinal_to_read += 1;
20199                next_offset += envelope_size;
20200            }
20201
20202            let next_out_of_line = decoder.next_out_of_line();
20203            let handles_before = decoder.remaining_handles();
20204            if let Some((inlined, num_bytes, num_handles)) =
20205                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20206            {
20207                let member_inline_size = <fidl::encoding::Vector<
20208                    fidl::encoding::Endpoint<
20209                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20210                    >,
20211                    64,
20212                > as fidl::encoding::TypeMarker>::inline_size(
20213                    decoder.context
20214                );
20215                if inlined != (member_inline_size <= 4) {
20216                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20217                }
20218                let inner_offset;
20219                let mut inner_depth = depth.clone();
20220                if inlined {
20221                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20222                    inner_offset = next_offset;
20223                } else {
20224                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20225                    inner_depth.increment()?;
20226                }
20227                let val_ref = self.tokens.get_or_insert_with(|| {
20228                    fidl::new_empty!(
20229                        fidl::encoding::Vector<
20230                            fidl::encoding::Endpoint<
20231                                fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20232                            >,
20233                            64,
20234                        >,
20235                        fidl::encoding::DefaultFuchsiaResourceDialect
20236                    )
20237                });
20238                fidl::decode!(
20239                    fidl::encoding::Vector<
20240                        fidl::encoding::Endpoint<
20241                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20242                        >,
20243                        64,
20244                    >,
20245                    fidl::encoding::DefaultFuchsiaResourceDialect,
20246                    val_ref,
20247                    decoder,
20248                    inner_offset,
20249                    inner_depth
20250                )?;
20251                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20252                {
20253                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20254                }
20255                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20256                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20257                }
20258            }
20259
20260            next_offset += envelope_size;
20261
20262            // Decode the remaining unknown envelopes.
20263            while next_offset < end_offset {
20264                _next_ordinal_to_read += 1;
20265                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20266                next_offset += envelope_size;
20267            }
20268
20269            Ok(())
20270        }
20271    }
20272
20273    impl BufferCollectionTokenDuplicateSyncResponse {
20274        #[inline(always)]
20275        fn max_ordinal_present(&self) -> u64 {
20276            if let Some(_) = self.tokens {
20277                return 1;
20278            }
20279            0
20280        }
20281    }
20282
20283    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20284        type Borrowed<'a> = &'a mut Self;
20285        fn take_or_borrow<'a>(
20286            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20287        ) -> Self::Borrowed<'a> {
20288            value
20289        }
20290    }
20291
20292    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20293        type Owned = Self;
20294
20295        #[inline(always)]
20296        fn inline_align(_context: fidl::encoding::Context) -> usize {
20297            8
20298        }
20299
20300        #[inline(always)]
20301        fn inline_size(_context: fidl::encoding::Context) -> usize {
20302            16
20303        }
20304    }
20305
20306    unsafe impl
20307        fidl::encoding::Encode<
20308            BufferCollectionTokenDuplicateSyncResponse,
20309            fidl::encoding::DefaultFuchsiaResourceDialect,
20310        > for &mut BufferCollectionTokenDuplicateSyncResponse
20311    {
20312        unsafe fn encode(
20313            self,
20314            encoder: &mut fidl::encoding::Encoder<
20315                '_,
20316                fidl::encoding::DefaultFuchsiaResourceDialect,
20317            >,
20318            offset: usize,
20319            mut depth: fidl::encoding::Depth,
20320        ) -> fidl::Result<()> {
20321            encoder.debug_check_bounds::<BufferCollectionTokenDuplicateSyncResponse>(offset);
20322            // Vector header
20323            let max_ordinal: u64 = self.max_ordinal_present();
20324            encoder.write_num(max_ordinal, offset);
20325            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20326            // Calling encoder.out_of_line_offset(0) is not allowed.
20327            if max_ordinal == 0 {
20328                return Ok(());
20329            }
20330            depth.increment()?;
20331            let envelope_size = 8;
20332            let bytes_len = max_ordinal as usize * envelope_size;
20333            #[allow(unused_variables)]
20334            let offset = encoder.out_of_line_offset(bytes_len);
20335            let mut _prev_end_offset: usize = 0;
20336            if 1 > max_ordinal {
20337                return Ok(());
20338            }
20339
20340            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20341            // are envelope_size bytes.
20342            let cur_offset: usize = (1 - 1) * envelope_size;
20343
20344            // Zero reserved fields.
20345            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20346
20347            // Safety:
20348            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20349            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20350            //   envelope_size bytes, there is always sufficient room.
20351            fidl::encoding::encode_in_envelope_optional::<
20352                fidl::encoding::Vector<
20353                    fidl::encoding::Endpoint<
20354                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20355                    >,
20356                    64,
20357                >,
20358                fidl::encoding::DefaultFuchsiaResourceDialect,
20359            >(
20360                self.tokens.as_mut().map(
20361                    <fidl::encoding::Vector<
20362                        fidl::encoding::Endpoint<
20363                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20364                        >,
20365                        64,
20366                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20367                ),
20368                encoder,
20369                offset + cur_offset,
20370                depth,
20371            )?;
20372
20373            _prev_end_offset = cur_offset + envelope_size;
20374
20375            Ok(())
20376        }
20377    }
20378
20379    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20380        for BufferCollectionTokenDuplicateSyncResponse
20381    {
20382        #[inline(always)]
20383        fn new_empty() -> Self {
20384            Self::default()
20385        }
20386
20387        unsafe fn decode(
20388            &mut self,
20389            decoder: &mut fidl::encoding::Decoder<
20390                '_,
20391                fidl::encoding::DefaultFuchsiaResourceDialect,
20392            >,
20393            offset: usize,
20394            mut depth: fidl::encoding::Depth,
20395        ) -> fidl::Result<()> {
20396            decoder.debug_check_bounds::<Self>(offset);
20397            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20398                None => return Err(fidl::Error::NotNullable),
20399                Some(len) => len,
20400            };
20401            // Calling decoder.out_of_line_offset(0) is not allowed.
20402            if len == 0 {
20403                return Ok(());
20404            };
20405            depth.increment()?;
20406            let envelope_size = 8;
20407            let bytes_len = len * envelope_size;
20408            let offset = decoder.out_of_line_offset(bytes_len)?;
20409            // Decode the envelope for each type.
20410            let mut _next_ordinal_to_read = 0;
20411            let mut next_offset = offset;
20412            let end_offset = offset + bytes_len;
20413            _next_ordinal_to_read += 1;
20414            if next_offset >= end_offset {
20415                return Ok(());
20416            }
20417
20418            // Decode unknown envelopes for gaps in ordinals.
20419            while _next_ordinal_to_read < 1 {
20420                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20421                _next_ordinal_to_read += 1;
20422                next_offset += envelope_size;
20423            }
20424
20425            let next_out_of_line = decoder.next_out_of_line();
20426            let handles_before = decoder.remaining_handles();
20427            if let Some((inlined, num_bytes, num_handles)) =
20428                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20429            {
20430                let member_inline_size = <fidl::encoding::Vector<
20431                    fidl::encoding::Endpoint<
20432                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20433                    >,
20434                    64,
20435                > as fidl::encoding::TypeMarker>::inline_size(
20436                    decoder.context
20437                );
20438                if inlined != (member_inline_size <= 4) {
20439                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20440                }
20441                let inner_offset;
20442                let mut inner_depth = depth.clone();
20443                if inlined {
20444                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20445                    inner_offset = next_offset;
20446                } else {
20447                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20448                    inner_depth.increment()?;
20449                }
20450                let val_ref = self.tokens.get_or_insert_with(|| {
20451                    fidl::new_empty!(
20452                        fidl::encoding::Vector<
20453                            fidl::encoding::Endpoint<
20454                                fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20455                            >,
20456                            64,
20457                        >,
20458                        fidl::encoding::DefaultFuchsiaResourceDialect
20459                    )
20460                });
20461                fidl::decode!(
20462                    fidl::encoding::Vector<
20463                        fidl::encoding::Endpoint<
20464                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20465                        >,
20466                        64,
20467                    >,
20468                    fidl::encoding::DefaultFuchsiaResourceDialect,
20469                    val_ref,
20470                    decoder,
20471                    inner_offset,
20472                    inner_depth
20473                )?;
20474                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20475                {
20476                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20477                }
20478                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20479                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20480                }
20481            }
20482
20483            next_offset += envelope_size;
20484
20485            // Decode the remaining unknown envelopes.
20486            while next_offset < end_offset {
20487                _next_ordinal_to_read += 1;
20488                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20489                next_offset += envelope_size;
20490            }
20491
20492            Ok(())
20493        }
20494    }
20495
20496    impl BufferCollectionWaitForAllBuffersAllocatedResponse {
20497        #[inline(always)]
20498        fn max_ordinal_present(&self) -> u64 {
20499            if let Some(_) = self.buffer_collection_info {
20500                return 1;
20501            }
20502            0
20503        }
20504    }
20505
20506    impl fidl::encoding::ResourceTypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20507        type Borrowed<'a> = &'a mut Self;
20508        fn take_or_borrow<'a>(
20509            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20510        ) -> Self::Borrowed<'a> {
20511            value
20512        }
20513    }
20514
20515    unsafe impl fidl::encoding::TypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20516        type Owned = Self;
20517
20518        #[inline(always)]
20519        fn inline_align(_context: fidl::encoding::Context) -> usize {
20520            8
20521        }
20522
20523        #[inline(always)]
20524        fn inline_size(_context: fidl::encoding::Context) -> usize {
20525            16
20526        }
20527    }
20528
20529    unsafe impl
20530        fidl::encoding::Encode<
20531            BufferCollectionWaitForAllBuffersAllocatedResponse,
20532            fidl::encoding::DefaultFuchsiaResourceDialect,
20533        > for &mut BufferCollectionWaitForAllBuffersAllocatedResponse
20534    {
20535        unsafe fn encode(
20536            self,
20537            encoder: &mut fidl::encoding::Encoder<
20538                '_,
20539                fidl::encoding::DefaultFuchsiaResourceDialect,
20540            >,
20541            offset: usize,
20542            mut depth: fidl::encoding::Depth,
20543        ) -> fidl::Result<()> {
20544            encoder
20545                .debug_check_bounds::<BufferCollectionWaitForAllBuffersAllocatedResponse>(offset);
20546            // Vector header
20547            let max_ordinal: u64 = self.max_ordinal_present();
20548            encoder.write_num(max_ordinal, offset);
20549            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20550            // Calling encoder.out_of_line_offset(0) is not allowed.
20551            if max_ordinal == 0 {
20552                return Ok(());
20553            }
20554            depth.increment()?;
20555            let envelope_size = 8;
20556            let bytes_len = max_ordinal as usize * envelope_size;
20557            #[allow(unused_variables)]
20558            let offset = encoder.out_of_line_offset(bytes_len);
20559            let mut _prev_end_offset: usize = 0;
20560            if 1 > max_ordinal {
20561                return Ok(());
20562            }
20563
20564            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20565            // are envelope_size bytes.
20566            let cur_offset: usize = (1 - 1) * envelope_size;
20567
20568            // Zero reserved fields.
20569            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20570
20571            // Safety:
20572            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20573            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20574            //   envelope_size bytes, there is always sufficient room.
20575            fidl::encoding::encode_in_envelope_optional::<
20576                BufferCollectionInfo,
20577                fidl::encoding::DefaultFuchsiaResourceDialect,
20578            >(
20579                self.buffer_collection_info.as_mut().map(
20580                    <BufferCollectionInfo as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20581                ),
20582                encoder,
20583                offset + cur_offset,
20584                depth,
20585            )?;
20586
20587            _prev_end_offset = cur_offset + envelope_size;
20588
20589            Ok(())
20590        }
20591    }
20592
20593    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20594        for BufferCollectionWaitForAllBuffersAllocatedResponse
20595    {
20596        #[inline(always)]
20597        fn new_empty() -> Self {
20598            Self::default()
20599        }
20600
20601        unsafe fn decode(
20602            &mut self,
20603            decoder: &mut fidl::encoding::Decoder<
20604                '_,
20605                fidl::encoding::DefaultFuchsiaResourceDialect,
20606            >,
20607            offset: usize,
20608            mut depth: fidl::encoding::Depth,
20609        ) -> fidl::Result<()> {
20610            decoder.debug_check_bounds::<Self>(offset);
20611            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20612                None => return Err(fidl::Error::NotNullable),
20613                Some(len) => len,
20614            };
20615            // Calling decoder.out_of_line_offset(0) is not allowed.
20616            if len == 0 {
20617                return Ok(());
20618            };
20619            depth.increment()?;
20620            let envelope_size = 8;
20621            let bytes_len = len * envelope_size;
20622            let offset = decoder.out_of_line_offset(bytes_len)?;
20623            // Decode the envelope for each type.
20624            let mut _next_ordinal_to_read = 0;
20625            let mut next_offset = offset;
20626            let end_offset = offset + bytes_len;
20627            _next_ordinal_to_read += 1;
20628            if next_offset >= end_offset {
20629                return Ok(());
20630            }
20631
20632            // Decode unknown envelopes for gaps in ordinals.
20633            while _next_ordinal_to_read < 1 {
20634                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20635                _next_ordinal_to_read += 1;
20636                next_offset += envelope_size;
20637            }
20638
20639            let next_out_of_line = decoder.next_out_of_line();
20640            let handles_before = decoder.remaining_handles();
20641            if let Some((inlined, num_bytes, num_handles)) =
20642                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20643            {
20644                let member_inline_size =
20645                    <BufferCollectionInfo as fidl::encoding::TypeMarker>::inline_size(
20646                        decoder.context,
20647                    );
20648                if inlined != (member_inline_size <= 4) {
20649                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20650                }
20651                let inner_offset;
20652                let mut inner_depth = depth.clone();
20653                if inlined {
20654                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20655                    inner_offset = next_offset;
20656                } else {
20657                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20658                    inner_depth.increment()?;
20659                }
20660                let val_ref = self.buffer_collection_info.get_or_insert_with(|| {
20661                    fidl::new_empty!(
20662                        BufferCollectionInfo,
20663                        fidl::encoding::DefaultFuchsiaResourceDialect
20664                    )
20665                });
20666                fidl::decode!(
20667                    BufferCollectionInfo,
20668                    fidl::encoding::DefaultFuchsiaResourceDialect,
20669                    val_ref,
20670                    decoder,
20671                    inner_offset,
20672                    inner_depth
20673                )?;
20674                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20675                {
20676                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20677                }
20678                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20679                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20680                }
20681            }
20682
20683            next_offset += envelope_size;
20684
20685            // Decode the remaining unknown envelopes.
20686            while next_offset < end_offset {
20687                _next_ordinal_to_read += 1;
20688                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20689                next_offset += envelope_size;
20690            }
20691
20692            Ok(())
20693        }
20694    }
20695
20696    impl NodeAttachNodeTrackingRequest {
20697        #[inline(always)]
20698        fn max_ordinal_present(&self) -> u64 {
20699            if let Some(_) = self.server_end {
20700                return 1;
20701            }
20702            0
20703        }
20704    }
20705
20706    impl fidl::encoding::ResourceTypeMarker for NodeAttachNodeTrackingRequest {
20707        type Borrowed<'a> = &'a mut Self;
20708        fn take_or_borrow<'a>(
20709            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20710        ) -> Self::Borrowed<'a> {
20711            value
20712        }
20713    }
20714
20715    unsafe impl fidl::encoding::TypeMarker for NodeAttachNodeTrackingRequest {
20716        type Owned = Self;
20717
20718        #[inline(always)]
20719        fn inline_align(_context: fidl::encoding::Context) -> usize {
20720            8
20721        }
20722
20723        #[inline(always)]
20724        fn inline_size(_context: fidl::encoding::Context) -> usize {
20725            16
20726        }
20727    }
20728
20729    unsafe impl
20730        fidl::encoding::Encode<
20731            NodeAttachNodeTrackingRequest,
20732            fidl::encoding::DefaultFuchsiaResourceDialect,
20733        > for &mut NodeAttachNodeTrackingRequest
20734    {
20735        unsafe fn encode(
20736            self,
20737            encoder: &mut fidl::encoding::Encoder<
20738                '_,
20739                fidl::encoding::DefaultFuchsiaResourceDialect,
20740            >,
20741            offset: usize,
20742            mut depth: fidl::encoding::Depth,
20743        ) -> fidl::Result<()> {
20744            encoder.debug_check_bounds::<NodeAttachNodeTrackingRequest>(offset);
20745            // Vector header
20746            let max_ordinal: u64 = self.max_ordinal_present();
20747            encoder.write_num(max_ordinal, offset);
20748            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20749            // Calling encoder.out_of_line_offset(0) is not allowed.
20750            if max_ordinal == 0 {
20751                return Ok(());
20752            }
20753            depth.increment()?;
20754            let envelope_size = 8;
20755            let bytes_len = max_ordinal as usize * envelope_size;
20756            #[allow(unused_variables)]
20757            let offset = encoder.out_of_line_offset(bytes_len);
20758            let mut _prev_end_offset: usize = 0;
20759            if 1 > max_ordinal {
20760                return Ok(());
20761            }
20762
20763            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20764            // are envelope_size bytes.
20765            let cur_offset: usize = (1 - 1) * envelope_size;
20766
20767            // Zero reserved fields.
20768            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20769
20770            // Safety:
20771            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20772            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20773            //   envelope_size bytes, there is always sufficient room.
20774            fidl::encoding::encode_in_envelope_optional::<
20775                fidl::encoding::HandleType<
20776                    fidl::EventPair,
20777                    { fidl::ObjectType::EVENTPAIR.into_raw() },
20778                    2147483648,
20779                >,
20780                fidl::encoding::DefaultFuchsiaResourceDialect,
20781            >(
20782                self.server_end.as_mut().map(
20783                    <fidl::encoding::HandleType<
20784                        fidl::EventPair,
20785                        { fidl::ObjectType::EVENTPAIR.into_raw() },
20786                        2147483648,
20787                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20788                ),
20789                encoder,
20790                offset + cur_offset,
20791                depth,
20792            )?;
20793
20794            _prev_end_offset = cur_offset + envelope_size;
20795
20796            Ok(())
20797        }
20798    }
20799
20800    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20801        for NodeAttachNodeTrackingRequest
20802    {
20803        #[inline(always)]
20804        fn new_empty() -> Self {
20805            Self::default()
20806        }
20807
20808        unsafe fn decode(
20809            &mut self,
20810            decoder: &mut fidl::encoding::Decoder<
20811                '_,
20812                fidl::encoding::DefaultFuchsiaResourceDialect,
20813            >,
20814            offset: usize,
20815            mut depth: fidl::encoding::Depth,
20816        ) -> fidl::Result<()> {
20817            decoder.debug_check_bounds::<Self>(offset);
20818            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20819                None => return Err(fidl::Error::NotNullable),
20820                Some(len) => len,
20821            };
20822            // Calling decoder.out_of_line_offset(0) is not allowed.
20823            if len == 0 {
20824                return Ok(());
20825            };
20826            depth.increment()?;
20827            let envelope_size = 8;
20828            let bytes_len = len * envelope_size;
20829            let offset = decoder.out_of_line_offset(bytes_len)?;
20830            // Decode the envelope for each type.
20831            let mut _next_ordinal_to_read = 0;
20832            let mut next_offset = offset;
20833            let end_offset = offset + bytes_len;
20834            _next_ordinal_to_read += 1;
20835            if next_offset >= end_offset {
20836                return Ok(());
20837            }
20838
20839            // Decode unknown envelopes for gaps in ordinals.
20840            while _next_ordinal_to_read < 1 {
20841                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20842                _next_ordinal_to_read += 1;
20843                next_offset += envelope_size;
20844            }
20845
20846            let next_out_of_line = decoder.next_out_of_line();
20847            let handles_before = decoder.remaining_handles();
20848            if let Some((inlined, num_bytes, num_handles)) =
20849                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20850            {
20851                let member_inline_size = <fidl::encoding::HandleType<
20852                    fidl::EventPair,
20853                    { fidl::ObjectType::EVENTPAIR.into_raw() },
20854                    2147483648,
20855                > as fidl::encoding::TypeMarker>::inline_size(
20856                    decoder.context
20857                );
20858                if inlined != (member_inline_size <= 4) {
20859                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20860                }
20861                let inner_offset;
20862                let mut inner_depth = depth.clone();
20863                if inlined {
20864                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20865                    inner_offset = next_offset;
20866                } else {
20867                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20868                    inner_depth.increment()?;
20869                }
20870                let val_ref =
20871                self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
20872                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
20873                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20874                {
20875                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20876                }
20877                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20878                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20879                }
20880            }
20881
20882            next_offset += envelope_size;
20883
20884            // Decode the remaining unknown envelopes.
20885            while next_offset < end_offset {
20886                _next_ordinal_to_read += 1;
20887                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20888                next_offset += envelope_size;
20889            }
20890
20891            Ok(())
20892        }
20893    }
20894
20895    impl NodeIsAlternateForRequest {
20896        #[inline(always)]
20897        fn max_ordinal_present(&self) -> u64 {
20898            if let Some(_) = self.node_ref {
20899                return 1;
20900            }
20901            0
20902        }
20903    }
20904
20905    impl fidl::encoding::ResourceTypeMarker for NodeIsAlternateForRequest {
20906        type Borrowed<'a> = &'a mut Self;
20907        fn take_or_borrow<'a>(
20908            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20909        ) -> Self::Borrowed<'a> {
20910            value
20911        }
20912    }
20913
20914    unsafe impl fidl::encoding::TypeMarker for NodeIsAlternateForRequest {
20915        type Owned = Self;
20916
20917        #[inline(always)]
20918        fn inline_align(_context: fidl::encoding::Context) -> usize {
20919            8
20920        }
20921
20922        #[inline(always)]
20923        fn inline_size(_context: fidl::encoding::Context) -> usize {
20924            16
20925        }
20926    }
20927
20928    unsafe impl
20929        fidl::encoding::Encode<
20930            NodeIsAlternateForRequest,
20931            fidl::encoding::DefaultFuchsiaResourceDialect,
20932        > for &mut NodeIsAlternateForRequest
20933    {
20934        unsafe fn encode(
20935            self,
20936            encoder: &mut fidl::encoding::Encoder<
20937                '_,
20938                fidl::encoding::DefaultFuchsiaResourceDialect,
20939            >,
20940            offset: usize,
20941            mut depth: fidl::encoding::Depth,
20942        ) -> fidl::Result<()> {
20943            encoder.debug_check_bounds::<NodeIsAlternateForRequest>(offset);
20944            // Vector header
20945            let max_ordinal: u64 = self.max_ordinal_present();
20946            encoder.write_num(max_ordinal, offset);
20947            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20948            // Calling encoder.out_of_line_offset(0) is not allowed.
20949            if max_ordinal == 0 {
20950                return Ok(());
20951            }
20952            depth.increment()?;
20953            let envelope_size = 8;
20954            let bytes_len = max_ordinal as usize * envelope_size;
20955            #[allow(unused_variables)]
20956            let offset = encoder.out_of_line_offset(bytes_len);
20957            let mut _prev_end_offset: usize = 0;
20958            if 1 > max_ordinal {
20959                return Ok(());
20960            }
20961
20962            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20963            // are envelope_size bytes.
20964            let cur_offset: usize = (1 - 1) * envelope_size;
20965
20966            // Zero reserved fields.
20967            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20968
20969            // Safety:
20970            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20971            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20972            //   envelope_size bytes, there is always sufficient room.
20973            fidl::encoding::encode_in_envelope_optional::<
20974                fidl::encoding::HandleType<
20975                    fidl::Event,
20976                    { fidl::ObjectType::EVENT.into_raw() },
20977                    2147483648,
20978                >,
20979                fidl::encoding::DefaultFuchsiaResourceDialect,
20980            >(
20981                self.node_ref.as_mut().map(
20982                    <fidl::encoding::HandleType<
20983                        fidl::Event,
20984                        { fidl::ObjectType::EVENT.into_raw() },
20985                        2147483648,
20986                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20987                ),
20988                encoder,
20989                offset + cur_offset,
20990                depth,
20991            )?;
20992
20993            _prev_end_offset = cur_offset + envelope_size;
20994
20995            Ok(())
20996        }
20997    }
20998
20999    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21000        for NodeIsAlternateForRequest
21001    {
21002        #[inline(always)]
21003        fn new_empty() -> Self {
21004            Self::default()
21005        }
21006
21007        unsafe fn decode(
21008            &mut self,
21009            decoder: &mut fidl::encoding::Decoder<
21010                '_,
21011                fidl::encoding::DefaultFuchsiaResourceDialect,
21012            >,
21013            offset: usize,
21014            mut depth: fidl::encoding::Depth,
21015        ) -> fidl::Result<()> {
21016            decoder.debug_check_bounds::<Self>(offset);
21017            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21018                None => return Err(fidl::Error::NotNullable),
21019                Some(len) => len,
21020            };
21021            // Calling decoder.out_of_line_offset(0) is not allowed.
21022            if len == 0 {
21023                return Ok(());
21024            };
21025            depth.increment()?;
21026            let envelope_size = 8;
21027            let bytes_len = len * envelope_size;
21028            let offset = decoder.out_of_line_offset(bytes_len)?;
21029            // Decode the envelope for each type.
21030            let mut _next_ordinal_to_read = 0;
21031            let mut next_offset = offset;
21032            let end_offset = offset + bytes_len;
21033            _next_ordinal_to_read += 1;
21034            if next_offset >= end_offset {
21035                return Ok(());
21036            }
21037
21038            // Decode unknown envelopes for gaps in ordinals.
21039            while _next_ordinal_to_read < 1 {
21040                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21041                _next_ordinal_to_read += 1;
21042                next_offset += envelope_size;
21043            }
21044
21045            let next_out_of_line = decoder.next_out_of_line();
21046            let handles_before = decoder.remaining_handles();
21047            if let Some((inlined, num_bytes, num_handles)) =
21048                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21049            {
21050                let member_inline_size = <fidl::encoding::HandleType<
21051                    fidl::Event,
21052                    { fidl::ObjectType::EVENT.into_raw() },
21053                    2147483648,
21054                > as fidl::encoding::TypeMarker>::inline_size(
21055                    decoder.context
21056                );
21057                if inlined != (member_inline_size <= 4) {
21058                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21059                }
21060                let inner_offset;
21061                let mut inner_depth = depth.clone();
21062                if inlined {
21063                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21064                    inner_offset = next_offset;
21065                } else {
21066                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21067                    inner_depth.increment()?;
21068                }
21069                let val_ref =
21070                self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21071                fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21072                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21073                {
21074                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21075                }
21076                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21077                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21078                }
21079            }
21080
21081            next_offset += envelope_size;
21082
21083            // Decode the remaining unknown envelopes.
21084            while next_offset < end_offset {
21085                _next_ordinal_to_read += 1;
21086                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21087                next_offset += envelope_size;
21088            }
21089
21090            Ok(())
21091        }
21092    }
21093
21094    impl NodeSetWeakOkRequest {
21095        #[inline(always)]
21096        fn max_ordinal_present(&self) -> u64 {
21097            if let Some(_) = self.for_child_nodes_also {
21098                return 1;
21099            }
21100            0
21101        }
21102    }
21103
21104    impl fidl::encoding::ResourceTypeMarker for NodeSetWeakOkRequest {
21105        type Borrowed<'a> = &'a mut Self;
21106        fn take_or_borrow<'a>(
21107            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21108        ) -> Self::Borrowed<'a> {
21109            value
21110        }
21111    }
21112
21113    unsafe impl fidl::encoding::TypeMarker for NodeSetWeakOkRequest {
21114        type Owned = Self;
21115
21116        #[inline(always)]
21117        fn inline_align(_context: fidl::encoding::Context) -> usize {
21118            8
21119        }
21120
21121        #[inline(always)]
21122        fn inline_size(_context: fidl::encoding::Context) -> usize {
21123            16
21124        }
21125    }
21126
21127    unsafe impl
21128        fidl::encoding::Encode<NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect>
21129        for &mut NodeSetWeakOkRequest
21130    {
21131        unsafe fn encode(
21132            self,
21133            encoder: &mut fidl::encoding::Encoder<
21134                '_,
21135                fidl::encoding::DefaultFuchsiaResourceDialect,
21136            >,
21137            offset: usize,
21138            mut depth: fidl::encoding::Depth,
21139        ) -> fidl::Result<()> {
21140            encoder.debug_check_bounds::<NodeSetWeakOkRequest>(offset);
21141            // Vector header
21142            let max_ordinal: u64 = self.max_ordinal_present();
21143            encoder.write_num(max_ordinal, offset);
21144            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21145            // Calling encoder.out_of_line_offset(0) is not allowed.
21146            if max_ordinal == 0 {
21147                return Ok(());
21148            }
21149            depth.increment()?;
21150            let envelope_size = 8;
21151            let bytes_len = max_ordinal as usize * envelope_size;
21152            #[allow(unused_variables)]
21153            let offset = encoder.out_of_line_offset(bytes_len);
21154            let mut _prev_end_offset: usize = 0;
21155            if 1 > max_ordinal {
21156                return Ok(());
21157            }
21158
21159            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21160            // are envelope_size bytes.
21161            let cur_offset: usize = (1 - 1) * envelope_size;
21162
21163            // Zero reserved fields.
21164            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21165
21166            // Safety:
21167            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21168            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21169            //   envelope_size bytes, there is always sufficient room.
21170            fidl::encoding::encode_in_envelope_optional::<
21171                bool,
21172                fidl::encoding::DefaultFuchsiaResourceDialect,
21173            >(
21174                self.for_child_nodes_also
21175                    .as_ref()
21176                    .map(<bool as fidl::encoding::ValueTypeMarker>::borrow),
21177                encoder,
21178                offset + cur_offset,
21179                depth,
21180            )?;
21181
21182            _prev_end_offset = cur_offset + envelope_size;
21183
21184            Ok(())
21185        }
21186    }
21187
21188    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21189        for NodeSetWeakOkRequest
21190    {
21191        #[inline(always)]
21192        fn new_empty() -> Self {
21193            Self::default()
21194        }
21195
21196        unsafe fn decode(
21197            &mut self,
21198            decoder: &mut fidl::encoding::Decoder<
21199                '_,
21200                fidl::encoding::DefaultFuchsiaResourceDialect,
21201            >,
21202            offset: usize,
21203            mut depth: fidl::encoding::Depth,
21204        ) -> fidl::Result<()> {
21205            decoder.debug_check_bounds::<Self>(offset);
21206            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21207                None => return Err(fidl::Error::NotNullable),
21208                Some(len) => len,
21209            };
21210            // Calling decoder.out_of_line_offset(0) is not allowed.
21211            if len == 0 {
21212                return Ok(());
21213            };
21214            depth.increment()?;
21215            let envelope_size = 8;
21216            let bytes_len = len * envelope_size;
21217            let offset = decoder.out_of_line_offset(bytes_len)?;
21218            // Decode the envelope for each type.
21219            let mut _next_ordinal_to_read = 0;
21220            let mut next_offset = offset;
21221            let end_offset = offset + bytes_len;
21222            _next_ordinal_to_read += 1;
21223            if next_offset >= end_offset {
21224                return Ok(());
21225            }
21226
21227            // Decode unknown envelopes for gaps in ordinals.
21228            while _next_ordinal_to_read < 1 {
21229                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21230                _next_ordinal_to_read += 1;
21231                next_offset += envelope_size;
21232            }
21233
21234            let next_out_of_line = decoder.next_out_of_line();
21235            let handles_before = decoder.remaining_handles();
21236            if let Some((inlined, num_bytes, num_handles)) =
21237                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21238            {
21239                let member_inline_size =
21240                    <bool as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21241                if inlined != (member_inline_size <= 4) {
21242                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21243                }
21244                let inner_offset;
21245                let mut inner_depth = depth.clone();
21246                if inlined {
21247                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21248                    inner_offset = next_offset;
21249                } else {
21250                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21251                    inner_depth.increment()?;
21252                }
21253                let val_ref = self.for_child_nodes_also.get_or_insert_with(|| {
21254                    fidl::new_empty!(bool, fidl::encoding::DefaultFuchsiaResourceDialect)
21255                });
21256                fidl::decode!(
21257                    bool,
21258                    fidl::encoding::DefaultFuchsiaResourceDialect,
21259                    val_ref,
21260                    decoder,
21261                    inner_offset,
21262                    inner_depth
21263                )?;
21264                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21265                {
21266                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21267                }
21268                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21269                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21270                }
21271            }
21272
21273            next_offset += envelope_size;
21274
21275            // Decode the remaining unknown envelopes.
21276            while next_offset < end_offset {
21277                _next_ordinal_to_read += 1;
21278                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21279                next_offset += envelope_size;
21280            }
21281
21282            Ok(())
21283        }
21284    }
21285
21286    impl NodeGetNodeRefResponse {
21287        #[inline(always)]
21288        fn max_ordinal_present(&self) -> u64 {
21289            if let Some(_) = self.node_ref {
21290                return 1;
21291            }
21292            0
21293        }
21294    }
21295
21296    impl fidl::encoding::ResourceTypeMarker for NodeGetNodeRefResponse {
21297        type Borrowed<'a> = &'a mut Self;
21298        fn take_or_borrow<'a>(
21299            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21300        ) -> Self::Borrowed<'a> {
21301            value
21302        }
21303    }
21304
21305    unsafe impl fidl::encoding::TypeMarker for NodeGetNodeRefResponse {
21306        type Owned = Self;
21307
21308        #[inline(always)]
21309        fn inline_align(_context: fidl::encoding::Context) -> usize {
21310            8
21311        }
21312
21313        #[inline(always)]
21314        fn inline_size(_context: fidl::encoding::Context) -> usize {
21315            16
21316        }
21317    }
21318
21319    unsafe impl
21320        fidl::encoding::Encode<
21321            NodeGetNodeRefResponse,
21322            fidl::encoding::DefaultFuchsiaResourceDialect,
21323        > for &mut NodeGetNodeRefResponse
21324    {
21325        unsafe fn encode(
21326            self,
21327            encoder: &mut fidl::encoding::Encoder<
21328                '_,
21329                fidl::encoding::DefaultFuchsiaResourceDialect,
21330            >,
21331            offset: usize,
21332            mut depth: fidl::encoding::Depth,
21333        ) -> fidl::Result<()> {
21334            encoder.debug_check_bounds::<NodeGetNodeRefResponse>(offset);
21335            // Vector header
21336            let max_ordinal: u64 = self.max_ordinal_present();
21337            encoder.write_num(max_ordinal, offset);
21338            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21339            // Calling encoder.out_of_line_offset(0) is not allowed.
21340            if max_ordinal == 0 {
21341                return Ok(());
21342            }
21343            depth.increment()?;
21344            let envelope_size = 8;
21345            let bytes_len = max_ordinal as usize * envelope_size;
21346            #[allow(unused_variables)]
21347            let offset = encoder.out_of_line_offset(bytes_len);
21348            let mut _prev_end_offset: usize = 0;
21349            if 1 > max_ordinal {
21350                return Ok(());
21351            }
21352
21353            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21354            // are envelope_size bytes.
21355            let cur_offset: usize = (1 - 1) * envelope_size;
21356
21357            // Zero reserved fields.
21358            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21359
21360            // Safety:
21361            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21362            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21363            //   envelope_size bytes, there is always sufficient room.
21364            fidl::encoding::encode_in_envelope_optional::<
21365                fidl::encoding::HandleType<
21366                    fidl::Event,
21367                    { fidl::ObjectType::EVENT.into_raw() },
21368                    2147483648,
21369                >,
21370                fidl::encoding::DefaultFuchsiaResourceDialect,
21371            >(
21372                self.node_ref.as_mut().map(
21373                    <fidl::encoding::HandleType<
21374                        fidl::Event,
21375                        { fidl::ObjectType::EVENT.into_raw() },
21376                        2147483648,
21377                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21378                ),
21379                encoder,
21380                offset + cur_offset,
21381                depth,
21382            )?;
21383
21384            _prev_end_offset = cur_offset + envelope_size;
21385
21386            Ok(())
21387        }
21388    }
21389
21390    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21391        for NodeGetNodeRefResponse
21392    {
21393        #[inline(always)]
21394        fn new_empty() -> Self {
21395            Self::default()
21396        }
21397
21398        unsafe fn decode(
21399            &mut self,
21400            decoder: &mut fidl::encoding::Decoder<
21401                '_,
21402                fidl::encoding::DefaultFuchsiaResourceDialect,
21403            >,
21404            offset: usize,
21405            mut depth: fidl::encoding::Depth,
21406        ) -> fidl::Result<()> {
21407            decoder.debug_check_bounds::<Self>(offset);
21408            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21409                None => return Err(fidl::Error::NotNullable),
21410                Some(len) => len,
21411            };
21412            // Calling decoder.out_of_line_offset(0) is not allowed.
21413            if len == 0 {
21414                return Ok(());
21415            };
21416            depth.increment()?;
21417            let envelope_size = 8;
21418            let bytes_len = len * envelope_size;
21419            let offset = decoder.out_of_line_offset(bytes_len)?;
21420            // Decode the envelope for each type.
21421            let mut _next_ordinal_to_read = 0;
21422            let mut next_offset = offset;
21423            let end_offset = offset + bytes_len;
21424            _next_ordinal_to_read += 1;
21425            if next_offset >= end_offset {
21426                return Ok(());
21427            }
21428
21429            // Decode unknown envelopes for gaps in ordinals.
21430            while _next_ordinal_to_read < 1 {
21431                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21432                _next_ordinal_to_read += 1;
21433                next_offset += envelope_size;
21434            }
21435
21436            let next_out_of_line = decoder.next_out_of_line();
21437            let handles_before = decoder.remaining_handles();
21438            if let Some((inlined, num_bytes, num_handles)) =
21439                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21440            {
21441                let member_inline_size = <fidl::encoding::HandleType<
21442                    fidl::Event,
21443                    { fidl::ObjectType::EVENT.into_raw() },
21444                    2147483648,
21445                > as fidl::encoding::TypeMarker>::inline_size(
21446                    decoder.context
21447                );
21448                if inlined != (member_inline_size <= 4) {
21449                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21450                }
21451                let inner_offset;
21452                let mut inner_depth = depth.clone();
21453                if inlined {
21454                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21455                    inner_offset = next_offset;
21456                } else {
21457                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21458                    inner_depth.increment()?;
21459                }
21460                let val_ref =
21461                self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21462                fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21463                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21464                {
21465                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21466                }
21467                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21468                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21469                }
21470            }
21471
21472            next_offset += envelope_size;
21473
21474            // Decode the remaining unknown envelopes.
21475            while next_offset < end_offset {
21476                _next_ordinal_to_read += 1;
21477                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21478                next_offset += envelope_size;
21479            }
21480
21481            Ok(())
21482        }
21483    }
21484
21485    impl VmoBuffer {
21486        #[inline(always)]
21487        fn max_ordinal_present(&self) -> u64 {
21488            if let Some(_) = self.close_weak_asap {
21489                return 3;
21490            }
21491            if let Some(_) = self.vmo_usable_start {
21492                return 2;
21493            }
21494            if let Some(_) = self.vmo {
21495                return 1;
21496            }
21497            0
21498        }
21499    }
21500
21501    impl fidl::encoding::ResourceTypeMarker for VmoBuffer {
21502        type Borrowed<'a> = &'a mut Self;
21503        fn take_or_borrow<'a>(
21504            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21505        ) -> Self::Borrowed<'a> {
21506            value
21507        }
21508    }
21509
21510    unsafe impl fidl::encoding::TypeMarker for VmoBuffer {
21511        type Owned = Self;
21512
21513        #[inline(always)]
21514        fn inline_align(_context: fidl::encoding::Context) -> usize {
21515            8
21516        }
21517
21518        #[inline(always)]
21519        fn inline_size(_context: fidl::encoding::Context) -> usize {
21520            16
21521        }
21522    }
21523
21524    unsafe impl fidl::encoding::Encode<VmoBuffer, fidl::encoding::DefaultFuchsiaResourceDialect>
21525        for &mut VmoBuffer
21526    {
21527        unsafe fn encode(
21528            self,
21529            encoder: &mut fidl::encoding::Encoder<
21530                '_,
21531                fidl::encoding::DefaultFuchsiaResourceDialect,
21532            >,
21533            offset: usize,
21534            mut depth: fidl::encoding::Depth,
21535        ) -> fidl::Result<()> {
21536            encoder.debug_check_bounds::<VmoBuffer>(offset);
21537            // Vector header
21538            let max_ordinal: u64 = self.max_ordinal_present();
21539            encoder.write_num(max_ordinal, offset);
21540            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21541            // Calling encoder.out_of_line_offset(0) is not allowed.
21542            if max_ordinal == 0 {
21543                return Ok(());
21544            }
21545            depth.increment()?;
21546            let envelope_size = 8;
21547            let bytes_len = max_ordinal as usize * envelope_size;
21548            #[allow(unused_variables)]
21549            let offset = encoder.out_of_line_offset(bytes_len);
21550            let mut _prev_end_offset: usize = 0;
21551            if 1 > max_ordinal {
21552                return Ok(());
21553            }
21554
21555            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21556            // are envelope_size bytes.
21557            let cur_offset: usize = (1 - 1) * envelope_size;
21558
21559            // Zero reserved fields.
21560            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21561
21562            // Safety:
21563            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21564            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21565            //   envelope_size bytes, there is always sufficient room.
21566            fidl::encoding::encode_in_envelope_optional::<
21567                fidl::encoding::HandleType<
21568                    fidl::Vmo,
21569                    { fidl::ObjectType::VMO.into_raw() },
21570                    2147483648,
21571                >,
21572                fidl::encoding::DefaultFuchsiaResourceDialect,
21573            >(
21574                self.vmo.as_mut().map(
21575                    <fidl::encoding::HandleType<
21576                        fidl::Vmo,
21577                        { fidl::ObjectType::VMO.into_raw() },
21578                        2147483648,
21579                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21580                ),
21581                encoder,
21582                offset + cur_offset,
21583                depth,
21584            )?;
21585
21586            _prev_end_offset = cur_offset + envelope_size;
21587            if 2 > max_ordinal {
21588                return Ok(());
21589            }
21590
21591            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21592            // are envelope_size bytes.
21593            let cur_offset: usize = (2 - 1) * envelope_size;
21594
21595            // Zero reserved fields.
21596            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21597
21598            // Safety:
21599            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21600            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21601            //   envelope_size bytes, there is always sufficient room.
21602            fidl::encoding::encode_in_envelope_optional::<
21603                u64,
21604                fidl::encoding::DefaultFuchsiaResourceDialect,
21605            >(
21606                self.vmo_usable_start
21607                    .as_ref()
21608                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
21609                encoder,
21610                offset + cur_offset,
21611                depth,
21612            )?;
21613
21614            _prev_end_offset = cur_offset + envelope_size;
21615            if 3 > max_ordinal {
21616                return Ok(());
21617            }
21618
21619            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21620            // are envelope_size bytes.
21621            let cur_offset: usize = (3 - 1) * envelope_size;
21622
21623            // Zero reserved fields.
21624            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21625
21626            // Safety:
21627            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21628            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21629            //   envelope_size bytes, there is always sufficient room.
21630            fidl::encoding::encode_in_envelope_optional::<
21631                fidl::encoding::HandleType<
21632                    fidl::EventPair,
21633                    { fidl::ObjectType::EVENTPAIR.into_raw() },
21634                    2147483648,
21635                >,
21636                fidl::encoding::DefaultFuchsiaResourceDialect,
21637            >(
21638                self.close_weak_asap.as_mut().map(
21639                    <fidl::encoding::HandleType<
21640                        fidl::EventPair,
21641                        { fidl::ObjectType::EVENTPAIR.into_raw() },
21642                        2147483648,
21643                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21644                ),
21645                encoder,
21646                offset + cur_offset,
21647                depth,
21648            )?;
21649
21650            _prev_end_offset = cur_offset + envelope_size;
21651
21652            Ok(())
21653        }
21654    }
21655
21656    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {
21657        #[inline(always)]
21658        fn new_empty() -> Self {
21659            Self::default()
21660        }
21661
21662        unsafe fn decode(
21663            &mut self,
21664            decoder: &mut fidl::encoding::Decoder<
21665                '_,
21666                fidl::encoding::DefaultFuchsiaResourceDialect,
21667            >,
21668            offset: usize,
21669            mut depth: fidl::encoding::Depth,
21670        ) -> fidl::Result<()> {
21671            decoder.debug_check_bounds::<Self>(offset);
21672            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21673                None => return Err(fidl::Error::NotNullable),
21674                Some(len) => len,
21675            };
21676            // Calling decoder.out_of_line_offset(0) is not allowed.
21677            if len == 0 {
21678                return Ok(());
21679            };
21680            depth.increment()?;
21681            let envelope_size = 8;
21682            let bytes_len = len * envelope_size;
21683            let offset = decoder.out_of_line_offset(bytes_len)?;
21684            // Decode the envelope for each type.
21685            let mut _next_ordinal_to_read = 0;
21686            let mut next_offset = offset;
21687            let end_offset = offset + bytes_len;
21688            _next_ordinal_to_read += 1;
21689            if next_offset >= end_offset {
21690                return Ok(());
21691            }
21692
21693            // Decode unknown envelopes for gaps in ordinals.
21694            while _next_ordinal_to_read < 1 {
21695                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21696                _next_ordinal_to_read += 1;
21697                next_offset += envelope_size;
21698            }
21699
21700            let next_out_of_line = decoder.next_out_of_line();
21701            let handles_before = decoder.remaining_handles();
21702            if let Some((inlined, num_bytes, num_handles)) =
21703                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21704            {
21705                let member_inline_size = <fidl::encoding::HandleType<
21706                    fidl::Vmo,
21707                    { fidl::ObjectType::VMO.into_raw() },
21708                    2147483648,
21709                > as fidl::encoding::TypeMarker>::inline_size(
21710                    decoder.context
21711                );
21712                if inlined != (member_inline_size <= 4) {
21713                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21714                }
21715                let inner_offset;
21716                let mut inner_depth = depth.clone();
21717                if inlined {
21718                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21719                    inner_offset = next_offset;
21720                } else {
21721                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21722                    inner_depth.increment()?;
21723                }
21724                let val_ref =
21725                self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21726                fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21727                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21728                {
21729                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21730                }
21731                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21732                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21733                }
21734            }
21735
21736            next_offset += envelope_size;
21737            _next_ordinal_to_read += 1;
21738            if next_offset >= end_offset {
21739                return Ok(());
21740            }
21741
21742            // Decode unknown envelopes for gaps in ordinals.
21743            while _next_ordinal_to_read < 2 {
21744                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21745                _next_ordinal_to_read += 1;
21746                next_offset += envelope_size;
21747            }
21748
21749            let next_out_of_line = decoder.next_out_of_line();
21750            let handles_before = decoder.remaining_handles();
21751            if let Some((inlined, num_bytes, num_handles)) =
21752                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21753            {
21754                let member_inline_size =
21755                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21756                if inlined != (member_inline_size <= 4) {
21757                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21758                }
21759                let inner_offset;
21760                let mut inner_depth = depth.clone();
21761                if inlined {
21762                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21763                    inner_offset = next_offset;
21764                } else {
21765                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21766                    inner_depth.increment()?;
21767                }
21768                let val_ref = self.vmo_usable_start.get_or_insert_with(|| {
21769                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
21770                });
21771                fidl::decode!(
21772                    u64,
21773                    fidl::encoding::DefaultFuchsiaResourceDialect,
21774                    val_ref,
21775                    decoder,
21776                    inner_offset,
21777                    inner_depth
21778                )?;
21779                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21780                {
21781                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21782                }
21783                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21784                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21785                }
21786            }
21787
21788            next_offset += envelope_size;
21789            _next_ordinal_to_read += 1;
21790            if next_offset >= end_offset {
21791                return Ok(());
21792            }
21793
21794            // Decode unknown envelopes for gaps in ordinals.
21795            while _next_ordinal_to_read < 3 {
21796                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21797                _next_ordinal_to_read += 1;
21798                next_offset += envelope_size;
21799            }
21800
21801            let next_out_of_line = decoder.next_out_of_line();
21802            let handles_before = decoder.remaining_handles();
21803            if let Some((inlined, num_bytes, num_handles)) =
21804                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21805            {
21806                let member_inline_size = <fidl::encoding::HandleType<
21807                    fidl::EventPair,
21808                    { fidl::ObjectType::EVENTPAIR.into_raw() },
21809                    2147483648,
21810                > as fidl::encoding::TypeMarker>::inline_size(
21811                    decoder.context
21812                );
21813                if inlined != (member_inline_size <= 4) {
21814                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21815                }
21816                let inner_offset;
21817                let mut inner_depth = depth.clone();
21818                if inlined {
21819                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21820                    inner_offset = next_offset;
21821                } else {
21822                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21823                    inner_depth.increment()?;
21824                }
21825                let val_ref =
21826                self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21827                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21828                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21829                {
21830                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21831                }
21832                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21833                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21834                }
21835            }
21836
21837            next_offset += envelope_size;
21838
21839            // Decode the remaining unknown envelopes.
21840            while next_offset < end_offset {
21841                _next_ordinal_to_read += 1;
21842                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21843                next_offset += envelope_size;
21844            }
21845
21846            Ok(())
21847        }
21848    }
21849}