fidl_fuchsia_sysmem2/
fidl_fuchsia_sysmem2.rs

1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_sysmem2_common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Default, PartialEq)]
15pub struct AllocatorAllocateNonSharedCollectionRequest {
16    pub collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17    #[doc(hidden)]
18    pub __source_breaking: fidl::marker::SourceBreaking,
19}
20
21impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
22    for AllocatorAllocateNonSharedCollectionRequest
23{
24}
25
26#[derive(Debug, Default, PartialEq)]
27pub struct AllocatorAllocateSharedCollectionRequest {
28    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
29    #[doc(hidden)]
30    pub __source_breaking: fidl::marker::SourceBreaking,
31}
32
33impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
34    for AllocatorAllocateSharedCollectionRequest
35{
36}
37
38#[derive(Debug, Default, PartialEq)]
39pub struct AllocatorBindSharedCollectionRequest {
40    pub token: Option<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
41    pub buffer_collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
42    #[doc(hidden)]
43    pub __source_breaking: fidl::marker::SourceBreaking,
44}
45
46impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
47    for AllocatorBindSharedCollectionRequest
48{
49}
50
51#[derive(Debug, Default, PartialEq)]
52pub struct AllocatorGetVmoInfoRequest {
53    /// `vmo` is required to be set; ownership is transferred to the server
54    /// so in most cases a client will duplicate a handle and transfer the
55    /// duplicate via this field.
56    pub vmo: Option<fidl::Vmo>,
57    #[doc(hidden)]
58    pub __source_breaking: fidl::marker::SourceBreaking,
59}
60
61impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
62    for AllocatorGetVmoInfoRequest
63{
64}
65
66#[derive(Debug, Default, PartialEq)]
67pub struct AllocatorGetVmoInfoResponse {
68    pub buffer_collection_id: Option<u64>,
69    pub buffer_index: Option<u64>,
70    pub close_weak_asap: Option<fidl::EventPair>,
71    #[doc(hidden)]
72    pub __source_breaking: fidl::marker::SourceBreaking,
73}
74
75impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
76    for AllocatorGetVmoInfoResponse
77{
78}
79
80#[derive(Debug, Default, PartialEq)]
81pub struct BufferCollectionAttachLifetimeTrackingRequest {
82    pub server_end: Option<fidl::EventPair>,
83    pub buffers_remaining: Option<u32>,
84    #[doc(hidden)]
85    pub __source_breaking: fidl::marker::SourceBreaking,
86}
87
88impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
89    for BufferCollectionAttachLifetimeTrackingRequest
90{
91}
92
93#[derive(Debug, Default, PartialEq)]
94pub struct BufferCollectionAttachTokenRequest {
95    pub rights_attenuation_mask: Option<fidl::Rights>,
96    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
97    #[doc(hidden)]
98    pub __source_breaking: fidl::marker::SourceBreaking,
99}
100
101impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
102    for BufferCollectionAttachTokenRequest
103{
104}
105
106/// Information about a buffer collection and its buffers.
107#[derive(Debug, Default, PartialEq)]
108pub struct BufferCollectionInfo {
109    /// These settings apply to all the buffers in the initial buffer
110    /// allocation.
111    ///
112    /// This field will always be set by sysmem.
113    pub settings: Option<SingleBufferSettings>,
114    /// VMO handles (and vmo_usable_start offset) for each buffer in the
115    /// collection.
116    ///
117    /// The size of this vector is the buffer_count (buffer_count is not sent
118    /// separately).
119    ///
120    /// All buffer VMO handles have identical size and access rights.  The size
121    /// is in settings.buffer_settings.size_bytes.
122    ///
123    /// The VMO access rights are determined based on the usages which the
124    /// client specified when allocating the buffer collection.  For example, a
125    /// client which expressed a read-only usage will receive VMOs without write
126    /// rights.  In addition, the rights can be attenuated by the parameter to
127    /// BufferCollectionToken.Duplicate() calls.
128    ///
129    /// This field will always have VmoBuffer(s) in it, even if the participant
130    /// specifies usage whieh does not require VMO handles.  This permits such a
131    /// participant to know the vmo_usable_start values, in case that's of any
132    /// use to the participant.
133    ///
134    /// This field will always be set by sysmem, even if the participant doesn't
135    /// specify any buffer usage (but the [`fuchsia.sysmem2/VmoBuffer.vmo`]
136    /// sub-field within this field won't be set in that case).
137    pub buffers: Option<Vec<VmoBuffer>>,
138    /// This number is unique among all logical buffer collections per boot.
139    ///
140    /// This ID number will be the same for all BufferCollectionToken(s),
141    /// BufferCollection(s), and BufferCollectionTokenGroup(s) associated with
142    /// the same logical buffer collection (derived from the same root token
143    /// created with fuchsia.sysmem2.Allocator.CreateSharedCollection, or with
144    /// CreateNonSharedCollection).
145    ///
146    /// The same ID can be retrieved from a BufferCollectionToken,
147    /// BufferCollection, or BufferCollectionTokenGroup using
148    /// GetBufferCollectionId (at the cost of a round-trip to sysmem and back).
149    ///
150    /// This field will always be set by sysmem.
151    pub buffer_collection_id: Option<u64>,
152    #[doc(hidden)]
153    pub __source_breaking: fidl::marker::SourceBreaking,
154}
155
156impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for BufferCollectionInfo {}
157
158#[derive(Debug, Default, PartialEq)]
159pub struct BufferCollectionSetConstraintsRequest {
160    pub constraints: Option<BufferCollectionConstraints>,
161    #[doc(hidden)]
162    pub __source_breaking: fidl::marker::SourceBreaking,
163}
164
165impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
166    for BufferCollectionSetConstraintsRequest
167{
168}
169
170#[derive(Debug, Default, PartialEq)]
171pub struct BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
172    pub group_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>>,
173    #[doc(hidden)]
174    pub __source_breaking: fidl::marker::SourceBreaking,
175}
176
177impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
178    for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
179{
180}
181
182#[derive(Debug, Default, PartialEq)]
183pub struct BufferCollectionTokenDuplicateRequest {
184    pub rights_attenuation_mask: Option<fidl::Rights>,
185    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
186    #[doc(hidden)]
187    pub __source_breaking: fidl::marker::SourceBreaking,
188}
189
190impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
191    for BufferCollectionTokenDuplicateRequest
192{
193}
194
195#[derive(Debug, Default, PartialEq)]
196pub struct BufferCollectionTokenGroupCreateChildRequest {
197    /// Must be set.
198    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
199    /// If not set, the default is `ZX_RIGHT_SAME_RIGHTS`.
200    pub rights_attenuation_mask: Option<fidl::Rights>,
201    #[doc(hidden)]
202    pub __source_breaking: fidl::marker::SourceBreaking,
203}
204
205impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
206    for BufferCollectionTokenGroupCreateChildRequest
207{
208}
209
210#[derive(Debug, Default, PartialEq)]
211pub struct BufferCollectionTokenGroupCreateChildrenSyncResponse {
212    pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
213    #[doc(hidden)]
214    pub __source_breaking: fidl::marker::SourceBreaking,
215}
216
217impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
218    for BufferCollectionTokenGroupCreateChildrenSyncResponse
219{
220}
221
222#[derive(Debug, Default, PartialEq)]
223pub struct BufferCollectionTokenDuplicateSyncResponse {
224    pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
225    #[doc(hidden)]
226    pub __source_breaking: fidl::marker::SourceBreaking,
227}
228
229impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
230    for BufferCollectionTokenDuplicateSyncResponse
231{
232}
233
234#[derive(Debug, Default, PartialEq)]
235pub struct BufferCollectionWaitForAllBuffersAllocatedResponse {
236    pub buffer_collection_info: Option<BufferCollectionInfo>,
237    #[doc(hidden)]
238    pub __source_breaking: fidl::marker::SourceBreaking,
239}
240
241impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
242    for BufferCollectionWaitForAllBuffersAllocatedResponse
243{
244}
245
246#[derive(Debug, Default, PartialEq)]
247pub struct NodeAttachNodeTrackingRequest {
248    /// This field must be set. This evenpair end will be closed after the
249    /// `Node` is closed or failed and the node's buffer counts are no
250    /// longer in effect in the logical buffer collection.
251    pub server_end: Option<fidl::EventPair>,
252    #[doc(hidden)]
253    pub __source_breaking: fidl::marker::SourceBreaking,
254}
255
256impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
257    for NodeAttachNodeTrackingRequest
258{
259}
260
261#[derive(Debug, Default, PartialEq)]
262pub struct NodeIsAlternateForRequest {
263    pub node_ref: Option<fidl::Event>,
264    #[doc(hidden)]
265    pub __source_breaking: fidl::marker::SourceBreaking,
266}
267
268impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeIsAlternateForRequest {}
269
270#[derive(Debug, Default, PartialEq)]
271pub struct NodeSetWeakOkRequest {
272    pub for_child_nodes_also: Option<bool>,
273    #[doc(hidden)]
274    pub __source_breaking: fidl::marker::SourceBreaking,
275}
276
277impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeSetWeakOkRequest {}
278
279#[derive(Debug, Default, PartialEq)]
280pub struct NodeGetNodeRefResponse {
281    pub node_ref: Option<fidl::Event>,
282    #[doc(hidden)]
283    pub __source_breaking: fidl::marker::SourceBreaking,
284}
285
286impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeGetNodeRefResponse {}
287
288#[derive(Debug, Default, PartialEq)]
289pub struct VmoBuffer {
290    /// `vmo` can be un-set if a participant has only
291    /// [`fuchsia.sysmem2/BufferUsage.none`] set to `NONE_USAGE` (explicitly or
292    /// implicitly by [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
293    /// without `constraints` set).
294    pub vmo: Option<fidl::Vmo>,
295    /// Offset within the VMO of the first usable byte. Must be < the VMO's size
296    /// in bytes, and leave sufficient room for BufferMemorySettings.size_bytes
297    /// before the end of the VMO.
298    ///
299    /// Currently sysmem will always set this field to 0, and in future, sysmem
300    /// won't set this field to a non-zero value unless all participants have
301    /// explicitly indicated support for non-zero vmo_usable_start (this
302    /// mechanism does not exist as of this comment). A participant that hasn't
303    /// explicitly indicated support for non-zero vmo_usable_start (all current
304    /// clients) should implicitly assume this field is set to 0 without
305    /// actually checking this field.
306    pub vmo_usable_start: Option<u64>,
307    /// This field is set iff `vmo` is a sysmem weak VMO handle. The client must
308    /// keep `close_weak_asap` around for as long as `vmo`, and must notice
309    /// `ZX_EVENTPAIR_PEER_CLOSED`. If that signal occurs, the client must close
310    /// `vmo` asap. Not doing so is considered a VMO leak by the client and in
311    /// that case sysmem will eventually complain loudly via syslog (currently
312    /// 5s later).
313    pub close_weak_asap: Option<fidl::EventPair>,
314    #[doc(hidden)]
315    pub __source_breaking: fidl::marker::SourceBreaking,
316}
317
318impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {}
319
320#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
321pub struct AllocatorMarker;
322
323impl fidl::endpoints::ProtocolMarker for AllocatorMarker {
324    type Proxy = AllocatorProxy;
325    type RequestStream = AllocatorRequestStream;
326    #[cfg(target_os = "fuchsia")]
327    type SynchronousProxy = AllocatorSynchronousProxy;
328
329    const DEBUG_NAME: &'static str = "fuchsia.sysmem2.Allocator";
330}
331impl fidl::endpoints::DiscoverableProtocolMarker for AllocatorMarker {}
332pub type AllocatorGetVmoInfoResult = Result<AllocatorGetVmoInfoResponse, Error>;
333
334pub trait AllocatorProxyInterface: Send + Sync {
335    fn r#allocate_non_shared_collection(
336        &self,
337        payload: AllocatorAllocateNonSharedCollectionRequest,
338    ) -> Result<(), fidl::Error>;
339    fn r#allocate_shared_collection(
340        &self,
341        payload: AllocatorAllocateSharedCollectionRequest,
342    ) -> Result<(), fidl::Error>;
343    fn r#bind_shared_collection(
344        &self,
345        payload: AllocatorBindSharedCollectionRequest,
346    ) -> Result<(), fidl::Error>;
347    type ValidateBufferCollectionTokenResponseFut: std::future::Future<
348            Output = Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error>,
349        > + Send;
350    fn r#validate_buffer_collection_token(
351        &self,
352        payload: &AllocatorValidateBufferCollectionTokenRequest,
353    ) -> Self::ValidateBufferCollectionTokenResponseFut;
354    fn r#set_debug_client_info(
355        &self,
356        payload: &AllocatorSetDebugClientInfoRequest,
357    ) -> Result<(), fidl::Error>;
358    type GetVmoInfoResponseFut: std::future::Future<Output = Result<AllocatorGetVmoInfoResult, fidl::Error>>
359        + Send;
360    fn r#get_vmo_info(&self, payload: AllocatorGetVmoInfoRequest) -> Self::GetVmoInfoResponseFut;
361}
362#[derive(Debug)]
363#[cfg(target_os = "fuchsia")]
364pub struct AllocatorSynchronousProxy {
365    client: fidl::client::sync::Client,
366}
367
368#[cfg(target_os = "fuchsia")]
369impl fidl::endpoints::SynchronousProxy for AllocatorSynchronousProxy {
370    type Proxy = AllocatorProxy;
371    type Protocol = AllocatorMarker;
372
373    fn from_channel(inner: fidl::Channel) -> Self {
374        Self::new(inner)
375    }
376
377    fn into_channel(self) -> fidl::Channel {
378        self.client.into_channel()
379    }
380
381    fn as_channel(&self) -> &fidl::Channel {
382        self.client.as_channel()
383    }
384}
385
386#[cfg(target_os = "fuchsia")]
387impl AllocatorSynchronousProxy {
388    pub fn new(channel: fidl::Channel) -> Self {
389        let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
390        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
391    }
392
393    pub fn into_channel(self) -> fidl::Channel {
394        self.client.into_channel()
395    }
396
397    /// Waits until an event arrives and returns it. It is safe for other
398    /// threads to make concurrent requests while waiting for an event.
399    pub fn wait_for_event(
400        &self,
401        deadline: zx::MonotonicInstant,
402    ) -> Result<AllocatorEvent, fidl::Error> {
403        AllocatorEvent::decode(self.client.wait_for_event(deadline)?)
404    }
405
406    /// Allocates a buffer collection on behalf of a single client (aka
407    /// initiator) who is also the only participant (from the point of view of
408    /// sysmem).
409    ///
410    /// This call exists mainly for temp/testing purposes.  This call skips the
411    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
412    /// allow another participant to specify its constraints.
413    ///
414    /// Real clients are encouraged to use
415    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
416    /// let relevant participants directly convey their own constraints to
417    /// sysmem by sending `BufferCollectionToken`s to those participants.
418    ///
419    /// + request `collection_request` The server end of the
420    ///   [`fuchsia.sysmem2/BufferCollection`].
421    pub fn r#allocate_non_shared_collection(
422        &self,
423        mut payload: AllocatorAllocateNonSharedCollectionRequest,
424    ) -> Result<(), fidl::Error> {
425        self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
426            &mut payload,
427            0x5ca681f025a80e44,
428            fidl::encoding::DynamicFlags::FLEXIBLE,
429        )
430    }
431
432    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
433    ///
434    /// The `BufferCollectionToken` can be "duplicated" for distribution to
435    /// participants by using
436    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
437    /// `BufferCollectionToken` can be converted into a
438    /// [`fuchsia.sysmem2.BufferCollection`] using
439    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
440    ///
441    /// Buffer constraints can be set via
442    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
443    ///
444    /// Success/failure to populate the buffer collection with buffers can be
445    /// determined from
446    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
447    ///
448    /// Closing the client end of a `BufferCollectionToken` or
449    /// `BufferCollection` (without `Release` first) will fail all client ends
450    /// in the same failure domain, which by default is all client ends of the
451    /// buffer collection. See
452    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
453    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
454    /// separate failure domains within a buffer collection.
455    pub fn r#allocate_shared_collection(
456        &self,
457        mut payload: AllocatorAllocateSharedCollectionRequest,
458    ) -> Result<(), fidl::Error> {
459        self.client.send::<AllocatorAllocateSharedCollectionRequest>(
460            &mut payload,
461            0x11a19ff51f0b49c1,
462            fidl::encoding::DynamicFlags::FLEXIBLE,
463        )
464    }
465
466    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
467    /// [`fuchsia.sysmem2/BufferCollection`].
468    ///
469    /// At the time of sending this message, the buffer collection hasn't yet
470    /// been populated with buffers - the participant must first also send
471    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
472    /// `BufferCollection` client end.
473    ///
474    /// All `BufferCollectionToken`(s) duplicated from a root
475    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
476    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
477    /// existing `BufferCollection` client ends must have sent `SetConstraints`
478    /// before the logical BufferCollection will be populated with buffers (or
479    /// will fail if the overall set of constraints can't be satisfied).
480    ///
481    /// + request `token` The client endpoint of a channel whose server end was
482    ///   sent to sysmem using
483    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
484    ///   end was sent to sysmem using
485    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
486    ///   being "turned in" in exchange for a
487    ///   [`fuchsia.sysmem2/BufferCollection`].
488    /// + request `buffer_collection_request` The server end of a
489    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
490    ///   client end. The `BufferCollection` channel is a single participant's
491    ///   connection to the logical buffer collection. Typically there will be
492    ///   other participants with their own `BufferCollection` channel to the
493    ///   logical buffer collection.
494    pub fn r#bind_shared_collection(
495        &self,
496        mut payload: AllocatorBindSharedCollectionRequest,
497    ) -> Result<(), fidl::Error> {
498        self.client.send::<AllocatorBindSharedCollectionRequest>(
499            &mut payload,
500            0x550916b0dc1d5b4e,
501            fidl::encoding::DynamicFlags::FLEXIBLE,
502        )
503    }
504
505    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
506    /// the sysmem server.
507    ///
508    /// With this call, the client can determine whether an incoming token is a
509    /// real sysmem token that is known to the sysmem server, without any risk
510    /// of getting stuck waiting forever on a potentially fake token to complete
511    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
512    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
513    /// FIDL message). In cases where the client trusts the source of the token
514    /// to provide a real token, this call is not typically needed outside of
515    /// debugging.
516    ///
517    /// If the validate fails sometimes but succeeds other times, the source of
518    /// the token may itself not be calling
519    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
520    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
521    /// token but before sending the token to the current client. It may be more
522    /// convenient for the source to use
523    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
524    /// token(s), since that call has the sync step built in. Or, the buffer
525    /// collection may be failing before this call is processed by the sysmem
526    /// server, as buffer collection failure cleans up sysmem's tracking of
527    /// associated tokens.
528    ///
529    /// This call has no effect on any token.
530    ///
531    /// + request `token_server_koid` The koid of the server end of a channel
532    ///   that might be a BufferCollectionToken channel.  This can be obtained
533    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
534    /// - response `is_known` true means sysmem knew of the token at the time
535    ///   sysmem processed the request, but doesn't guarantee that the token is
536    ///   still valid by the time the client receives the reply. What it does
537    ///   guarantee is that the token at least was a real token, so a two-way
538    ///   call to the token won't stall forever (will fail or succeed fairly
539    ///   quickly, not stall). This can already be known implicitly if the
540    ///   source of the token can be trusted to provide a real token. A false
541    ///   value means the token wasn't known to sysmem at the time sysmem
542    ///   processed this call, but the token may have previously been valid, or
543    ///   may yet become valid. Or if the sender of the token isn't trusted to
544    ///   provide a real token, the token may be fake. It's the responsibility
545    ///   of the sender to sync with sysmem to ensure that previously
546    ///   created/duplicated token(s) are known to sysmem, before sending the
547    ///   token(s) to other participants.
548    pub fn r#validate_buffer_collection_token(
549        &self,
550        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
551        ___deadline: zx::MonotonicInstant,
552    ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
553        let _response = self.client.send_query::<
554            AllocatorValidateBufferCollectionTokenRequest,
555            fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
556        >(
557            payload,
558            0x4c5ee91b02a7e68d,
559            fidl::encoding::DynamicFlags::FLEXIBLE,
560            ___deadline,
561        )?
562        .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
563        Ok(_response)
564    }
565
566    /// Set information about the current client that can be used by sysmem to
567    /// help diagnose leaking memory and allocation stalls waiting for a
568    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
569    ///
570    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
571    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
572    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
573    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
574    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
575    /// these `BufferCollection`(s) have the same initial debug client info as
576    /// the token turned in to create the `BufferCollection`).
577    ///
578    /// This info can be subsequently overridden on a per-`Node` basis by
579    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
580    ///
581    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
582    /// `Allocator` is the most efficient way to ensure that all
583    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
584    /// set, and is also more efficient than separately sending the same debug
585    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
586    /// created [`fuchsia.sysmem2/Node`].
587    ///
588    /// + request `name` This can be an arbitrary string, but the current
589    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
590    /// + request `id` This can be an arbitrary id, but the current process ID
591    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
592    pub fn r#set_debug_client_info(
593        &self,
594        mut payload: &AllocatorSetDebugClientInfoRequest,
595    ) -> Result<(), fidl::Error> {
596        self.client.send::<AllocatorSetDebugClientInfoRequest>(
597            payload,
598            0x6f68f19a3f509c4d,
599            fidl::encoding::DynamicFlags::FLEXIBLE,
600        )
601    }
602
603    /// Given a handle to a sysmem-provided VMO, this returns additional info
604    /// about the corresponding sysmem logical buffer.
605    ///
606    /// Most callers will duplicate a VMO handle first and send the duplicate to
607    /// this call.
608    ///
609    /// If the client has created a child VMO of a sysmem-provided VMO, that
610    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
611    ///
612    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
613    /// - response `buffer_collection_id` The buffer collection ID, which is
614    ///   unique per logical buffer collection per boot.
615    /// - response `buffer_index` The buffer index of the buffer within the
616    ///   buffer collection. This is the same as the index of the buffer within
617    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
618    ///   is the same for all sysmem-delivered VMOs corresponding to the same
619    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
620    ///   only unique across buffers of a buffer collection. For a given buffer,
621    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
622    ///   per boot.
623    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
624    ///   the `close_weak_asap` field will be set in the response. This handle
625    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
626    ///   the buffer should be closed as soon as possible. This is signalled
627    ///   shortly after all strong sysmem VMOs to the buffer are closed
628    ///   (including any held indirectly via strong `BufferCollectionToken` or
629    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
630    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
631    ///   considered a VMO leak caused by the client still holding a weak sysmem
632    ///   VMO handle and results in loud complaints to the log by sysmem. The
633    ///   buffers of a collection can be freed independently of each other. The
634    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
635    ///   response arrives at the client. A client that isn't prepared to handle
636    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
637    ///   the buffer and fail any associated request.
638    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
639    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
640    ///   the VMO handle passed in to this call itself keeps the VMO's info
641    ///   alive for purposes of responding to this call. Because of this,
642    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
643    ///   handles to the VMO when calling; even if other handles are closed
644    ///   before the GetVmoInfo response arrives at the client).
645    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
646    ///   capable of being used with GetVmoInfo due to rights/capability
647    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
648    ///   topic [`ZX_INFO_HANDLE_BASIC`].
649    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
650    ///   unspecified reason. See the log for more info.
651    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
652    ///   wasn't set, or there was some other problem with the request field(s).
653    pub fn r#get_vmo_info(
654        &self,
655        mut payload: AllocatorGetVmoInfoRequest,
656        ___deadline: zx::MonotonicInstant,
657    ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
658        let _response = self.client.send_query::<
659            AllocatorGetVmoInfoRequest,
660            fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
661        >(
662            &mut payload,
663            0x21a881120aa0ddf9,
664            fidl::encoding::DynamicFlags::FLEXIBLE,
665            ___deadline,
666        )?
667        .into_result::<AllocatorMarker>("get_vmo_info")?;
668        Ok(_response.map(|x| x))
669    }
670}
671
672#[derive(Debug, Clone)]
673pub struct AllocatorProxy {
674    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
675}
676
677impl fidl::endpoints::Proxy for AllocatorProxy {
678    type Protocol = AllocatorMarker;
679
680    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
681        Self::new(inner)
682    }
683
684    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
685        self.client.into_channel().map_err(|client| Self { client })
686    }
687
688    fn as_channel(&self) -> &::fidl::AsyncChannel {
689        self.client.as_channel()
690    }
691}
692
693impl AllocatorProxy {
694    /// Create a new Proxy for fuchsia.sysmem2/Allocator.
695    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
696        let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
697        Self { client: fidl::client::Client::new(channel, protocol_name) }
698    }
699
700    /// Get a Stream of events from the remote end of the protocol.
701    ///
702    /// # Panics
703    ///
704    /// Panics if the event stream was already taken.
705    pub fn take_event_stream(&self) -> AllocatorEventStream {
706        AllocatorEventStream { event_receiver: self.client.take_event_receiver() }
707    }
708
709    /// Allocates a buffer collection on behalf of a single client (aka
710    /// initiator) who is also the only participant (from the point of view of
711    /// sysmem).
712    ///
713    /// This call exists mainly for temp/testing purposes.  This call skips the
714    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
715    /// allow another participant to specify its constraints.
716    ///
717    /// Real clients are encouraged to use
718    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
719    /// let relevant participants directly convey their own constraints to
720    /// sysmem by sending `BufferCollectionToken`s to those participants.
721    ///
722    /// + request `collection_request` The server end of the
723    ///   [`fuchsia.sysmem2/BufferCollection`].
724    pub fn r#allocate_non_shared_collection(
725        &self,
726        mut payload: AllocatorAllocateNonSharedCollectionRequest,
727    ) -> Result<(), fidl::Error> {
728        AllocatorProxyInterface::r#allocate_non_shared_collection(self, payload)
729    }
730
731    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
732    ///
733    /// The `BufferCollectionToken` can be "duplicated" for distribution to
734    /// participants by using
735    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
736    /// `BufferCollectionToken` can be converted into a
737    /// [`fuchsia.sysmem2.BufferCollection`] using
738    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
739    ///
740    /// Buffer constraints can be set via
741    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
742    ///
743    /// Success/failure to populate the buffer collection with buffers can be
744    /// determined from
745    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
746    ///
747    /// Closing the client end of a `BufferCollectionToken` or
748    /// `BufferCollection` (without `Release` first) will fail all client ends
749    /// in the same failure domain, which by default is all client ends of the
750    /// buffer collection. See
751    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
752    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
753    /// separate failure domains within a buffer collection.
754    pub fn r#allocate_shared_collection(
755        &self,
756        mut payload: AllocatorAllocateSharedCollectionRequest,
757    ) -> Result<(), fidl::Error> {
758        AllocatorProxyInterface::r#allocate_shared_collection(self, payload)
759    }
760
761    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
762    /// [`fuchsia.sysmem2/BufferCollection`].
763    ///
764    /// At the time of sending this message, the buffer collection hasn't yet
765    /// been populated with buffers - the participant must first also send
766    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
767    /// `BufferCollection` client end.
768    ///
769    /// All `BufferCollectionToken`(s) duplicated from a root
770    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
771    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
772    /// existing `BufferCollection` client ends must have sent `SetConstraints`
773    /// before the logical BufferCollection will be populated with buffers (or
774    /// will fail if the overall set of constraints can't be satisfied).
775    ///
776    /// + request `token` The client endpoint of a channel whose server end was
777    ///   sent to sysmem using
778    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
779    ///   end was sent to sysmem using
780    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
781    ///   being "turned in" in exchange for a
782    ///   [`fuchsia.sysmem2/BufferCollection`].
783    /// + request `buffer_collection_request` The server end of a
784    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
785    ///   client end. The `BufferCollection` channel is a single participant's
786    ///   connection to the logical buffer collection. Typically there will be
787    ///   other participants with their own `BufferCollection` channel to the
788    ///   logical buffer collection.
789    pub fn r#bind_shared_collection(
790        &self,
791        mut payload: AllocatorBindSharedCollectionRequest,
792    ) -> Result<(), fidl::Error> {
793        AllocatorProxyInterface::r#bind_shared_collection(self, payload)
794    }
795
796    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
797    /// the sysmem server.
798    ///
799    /// With this call, the client can determine whether an incoming token is a
800    /// real sysmem token that is known to the sysmem server, without any risk
801    /// of getting stuck waiting forever on a potentially fake token to complete
802    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
803    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
804    /// FIDL message). In cases where the client trusts the source of the token
805    /// to provide a real token, this call is not typically needed outside of
806    /// debugging.
807    ///
808    /// If the validate fails sometimes but succeeds other times, the source of
809    /// the token may itself not be calling
810    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
811    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
812    /// token but before sending the token to the current client. It may be more
813    /// convenient for the source to use
814    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
815    /// token(s), since that call has the sync step built in. Or, the buffer
816    /// collection may be failing before this call is processed by the sysmem
817    /// server, as buffer collection failure cleans up sysmem's tracking of
818    /// associated tokens.
819    ///
820    /// This call has no effect on any token.
821    ///
822    /// + request `token_server_koid` The koid of the server end of a channel
823    ///   that might be a BufferCollectionToken channel.  This can be obtained
824    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
825    /// - response `is_known` true means sysmem knew of the token at the time
826    ///   sysmem processed the request, but doesn't guarantee that the token is
827    ///   still valid by the time the client receives the reply. What it does
828    ///   guarantee is that the token at least was a real token, so a two-way
829    ///   call to the token won't stall forever (will fail or succeed fairly
830    ///   quickly, not stall). This can already be known implicitly if the
831    ///   source of the token can be trusted to provide a real token. A false
832    ///   value means the token wasn't known to sysmem at the time sysmem
833    ///   processed this call, but the token may have previously been valid, or
834    ///   may yet become valid. Or if the sender of the token isn't trusted to
835    ///   provide a real token, the token may be fake. It's the responsibility
836    ///   of the sender to sync with sysmem to ensure that previously
837    ///   created/duplicated token(s) are known to sysmem, before sending the
838    ///   token(s) to other participants.
839    pub fn r#validate_buffer_collection_token(
840        &self,
841        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
842    ) -> fidl::client::QueryResponseFut<
843        AllocatorValidateBufferCollectionTokenResponse,
844        fidl::encoding::DefaultFuchsiaResourceDialect,
845    > {
846        AllocatorProxyInterface::r#validate_buffer_collection_token(self, payload)
847    }
848
849    /// Set information about the current client that can be used by sysmem to
850    /// help diagnose leaking memory and allocation stalls waiting for a
851    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
852    ///
853    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
854    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
855    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
856    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
857    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
858    /// these `BufferCollection`(s) have the same initial debug client info as
859    /// the token turned in to create the `BufferCollection`).
860    ///
861    /// This info can be subsequently overridden on a per-`Node` basis by
862    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
863    ///
864    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
865    /// `Allocator` is the most efficient way to ensure that all
866    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
867    /// set, and is also more efficient than separately sending the same debug
868    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
869    /// created [`fuchsia.sysmem2/Node`].
870    ///
871    /// + request `name` This can be an arbitrary string, but the current
872    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
873    /// + request `id` This can be an arbitrary id, but the current process ID
874    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
875    pub fn r#set_debug_client_info(
876        &self,
877        mut payload: &AllocatorSetDebugClientInfoRequest,
878    ) -> Result<(), fidl::Error> {
879        AllocatorProxyInterface::r#set_debug_client_info(self, payload)
880    }
881
882    /// Given a handle to a sysmem-provided VMO, this returns additional info
883    /// about the corresponding sysmem logical buffer.
884    ///
885    /// Most callers will duplicate a VMO handle first and send the duplicate to
886    /// this call.
887    ///
888    /// If the client has created a child VMO of a sysmem-provided VMO, that
889    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
890    ///
891    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
892    /// - response `buffer_collection_id` The buffer collection ID, which is
893    ///   unique per logical buffer collection per boot.
894    /// - response `buffer_index` The buffer index of the buffer within the
895    ///   buffer collection. This is the same as the index of the buffer within
896    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
897    ///   is the same for all sysmem-delivered VMOs corresponding to the same
898    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
899    ///   only unique across buffers of a buffer collection. For a given buffer,
900    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
901    ///   per boot.
902    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
903    ///   the `close_weak_asap` field will be set in the response. This handle
904    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
905    ///   the buffer should be closed as soon as possible. This is signalled
906    ///   shortly after all strong sysmem VMOs to the buffer are closed
907    ///   (including any held indirectly via strong `BufferCollectionToken` or
908    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
909    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
910    ///   considered a VMO leak caused by the client still holding a weak sysmem
911    ///   VMO handle and results in loud complaints to the log by sysmem. The
912    ///   buffers of a collection can be freed independently of each other. The
913    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
914    ///   response arrives at the client. A client that isn't prepared to handle
915    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
916    ///   the buffer and fail any associated request.
917    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
918    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
919    ///   the VMO handle passed in to this call itself keeps the VMO's info
920    ///   alive for purposes of responding to this call. Because of this,
921    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
922    ///   handles to the VMO when calling; even if other handles are closed
923    ///   before the GetVmoInfo response arrives at the client).
924    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
925    ///   capable of being used with GetVmoInfo due to rights/capability
926    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
927    ///   topic [`ZX_INFO_HANDLE_BASIC`].
928    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
929    ///   unspecified reason. See the log for more info.
930    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
931    ///   wasn't set, or there was some other problem with the request field(s).
932    pub fn r#get_vmo_info(
933        &self,
934        mut payload: AllocatorGetVmoInfoRequest,
935    ) -> fidl::client::QueryResponseFut<
936        AllocatorGetVmoInfoResult,
937        fidl::encoding::DefaultFuchsiaResourceDialect,
938    > {
939        AllocatorProxyInterface::r#get_vmo_info(self, payload)
940    }
941}
942
943impl AllocatorProxyInterface for AllocatorProxy {
944    fn r#allocate_non_shared_collection(
945        &self,
946        mut payload: AllocatorAllocateNonSharedCollectionRequest,
947    ) -> Result<(), fidl::Error> {
948        self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
949            &mut payload,
950            0x5ca681f025a80e44,
951            fidl::encoding::DynamicFlags::FLEXIBLE,
952        )
953    }
954
955    fn r#allocate_shared_collection(
956        &self,
957        mut payload: AllocatorAllocateSharedCollectionRequest,
958    ) -> Result<(), fidl::Error> {
959        self.client.send::<AllocatorAllocateSharedCollectionRequest>(
960            &mut payload,
961            0x11a19ff51f0b49c1,
962            fidl::encoding::DynamicFlags::FLEXIBLE,
963        )
964    }
965
966    fn r#bind_shared_collection(
967        &self,
968        mut payload: AllocatorBindSharedCollectionRequest,
969    ) -> Result<(), fidl::Error> {
970        self.client.send::<AllocatorBindSharedCollectionRequest>(
971            &mut payload,
972            0x550916b0dc1d5b4e,
973            fidl::encoding::DynamicFlags::FLEXIBLE,
974        )
975    }
976
977    type ValidateBufferCollectionTokenResponseFut = fidl::client::QueryResponseFut<
978        AllocatorValidateBufferCollectionTokenResponse,
979        fidl::encoding::DefaultFuchsiaResourceDialect,
980    >;
981    fn r#validate_buffer_collection_token(
982        &self,
983        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
984    ) -> Self::ValidateBufferCollectionTokenResponseFut {
985        fn _decode(
986            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
987        ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
988            let _response = fidl::client::decode_transaction_body::<
989                fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
990                fidl::encoding::DefaultFuchsiaResourceDialect,
991                0x4c5ee91b02a7e68d,
992            >(_buf?)?
993            .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
994            Ok(_response)
995        }
996        self.client.send_query_and_decode::<
997            AllocatorValidateBufferCollectionTokenRequest,
998            AllocatorValidateBufferCollectionTokenResponse,
999        >(
1000            payload,
1001            0x4c5ee91b02a7e68d,
1002            fidl::encoding::DynamicFlags::FLEXIBLE,
1003            _decode,
1004        )
1005    }
1006
1007    fn r#set_debug_client_info(
1008        &self,
1009        mut payload: &AllocatorSetDebugClientInfoRequest,
1010    ) -> Result<(), fidl::Error> {
1011        self.client.send::<AllocatorSetDebugClientInfoRequest>(
1012            payload,
1013            0x6f68f19a3f509c4d,
1014            fidl::encoding::DynamicFlags::FLEXIBLE,
1015        )
1016    }
1017
1018    type GetVmoInfoResponseFut = fidl::client::QueryResponseFut<
1019        AllocatorGetVmoInfoResult,
1020        fidl::encoding::DefaultFuchsiaResourceDialect,
1021    >;
1022    fn r#get_vmo_info(
1023        &self,
1024        mut payload: AllocatorGetVmoInfoRequest,
1025    ) -> Self::GetVmoInfoResponseFut {
1026        fn _decode(
1027            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1028        ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
1029            let _response = fidl::client::decode_transaction_body::<
1030                fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
1031                fidl::encoding::DefaultFuchsiaResourceDialect,
1032                0x21a881120aa0ddf9,
1033            >(_buf?)?
1034            .into_result::<AllocatorMarker>("get_vmo_info")?;
1035            Ok(_response.map(|x| x))
1036        }
1037        self.client.send_query_and_decode::<AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResult>(
1038            &mut payload,
1039            0x21a881120aa0ddf9,
1040            fidl::encoding::DynamicFlags::FLEXIBLE,
1041            _decode,
1042        )
1043    }
1044}
1045
1046pub struct AllocatorEventStream {
1047    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
1048}
1049
1050impl std::marker::Unpin for AllocatorEventStream {}
1051
1052impl futures::stream::FusedStream for AllocatorEventStream {
1053    fn is_terminated(&self) -> bool {
1054        self.event_receiver.is_terminated()
1055    }
1056}
1057
1058impl futures::Stream for AllocatorEventStream {
1059    type Item = Result<AllocatorEvent, fidl::Error>;
1060
1061    fn poll_next(
1062        mut self: std::pin::Pin<&mut Self>,
1063        cx: &mut std::task::Context<'_>,
1064    ) -> std::task::Poll<Option<Self::Item>> {
1065        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
1066            &mut self.event_receiver,
1067            cx
1068        )?) {
1069            Some(buf) => std::task::Poll::Ready(Some(AllocatorEvent::decode(buf))),
1070            None => std::task::Poll::Ready(None),
1071        }
1072    }
1073}
1074
1075#[derive(Debug)]
1076pub enum AllocatorEvent {
1077    #[non_exhaustive]
1078    _UnknownEvent {
1079        /// Ordinal of the event that was sent.
1080        ordinal: u64,
1081    },
1082}
1083
1084impl AllocatorEvent {
1085    /// Decodes a message buffer as a [`AllocatorEvent`].
1086    fn decode(
1087        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
1088    ) -> Result<AllocatorEvent, fidl::Error> {
1089        let (bytes, _handles) = buf.split_mut();
1090        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1091        debug_assert_eq!(tx_header.tx_id, 0);
1092        match tx_header.ordinal {
1093            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
1094                Ok(AllocatorEvent::_UnknownEvent { ordinal: tx_header.ordinal })
1095            }
1096            _ => Err(fidl::Error::UnknownOrdinal {
1097                ordinal: tx_header.ordinal,
1098                protocol_name: <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1099            }),
1100        }
1101    }
1102}
1103
1104/// A Stream of incoming requests for fuchsia.sysmem2/Allocator.
1105pub struct AllocatorRequestStream {
1106    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1107    is_terminated: bool,
1108}
1109
1110impl std::marker::Unpin for AllocatorRequestStream {}
1111
1112impl futures::stream::FusedStream for AllocatorRequestStream {
1113    fn is_terminated(&self) -> bool {
1114        self.is_terminated
1115    }
1116}
1117
1118impl fidl::endpoints::RequestStream for AllocatorRequestStream {
1119    type Protocol = AllocatorMarker;
1120    type ControlHandle = AllocatorControlHandle;
1121
1122    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
1123        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
1124    }
1125
1126    fn control_handle(&self) -> Self::ControlHandle {
1127        AllocatorControlHandle { inner: self.inner.clone() }
1128    }
1129
1130    fn into_inner(
1131        self,
1132    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
1133    {
1134        (self.inner, self.is_terminated)
1135    }
1136
1137    fn from_inner(
1138        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1139        is_terminated: bool,
1140    ) -> Self {
1141        Self { inner, is_terminated }
1142    }
1143}
1144
1145impl futures::Stream for AllocatorRequestStream {
1146    type Item = Result<AllocatorRequest, fidl::Error>;
1147
1148    fn poll_next(
1149        mut self: std::pin::Pin<&mut Self>,
1150        cx: &mut std::task::Context<'_>,
1151    ) -> std::task::Poll<Option<Self::Item>> {
1152        let this = &mut *self;
1153        if this.inner.check_shutdown(cx) {
1154            this.is_terminated = true;
1155            return std::task::Poll::Ready(None);
1156        }
1157        if this.is_terminated {
1158            panic!("polled AllocatorRequestStream after completion");
1159        }
1160        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
1161            |bytes, handles| {
1162                match this.inner.channel().read_etc(cx, bytes, handles) {
1163                    std::task::Poll::Ready(Ok(())) => {}
1164                    std::task::Poll::Pending => return std::task::Poll::Pending,
1165                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
1166                        this.is_terminated = true;
1167                        return std::task::Poll::Ready(None);
1168                    }
1169                    std::task::Poll::Ready(Err(e)) => {
1170                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
1171                            e.into(),
1172                        ))))
1173                    }
1174                }
1175
1176                // A message has been received from the channel
1177                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1178
1179                std::task::Poll::Ready(Some(match header.ordinal {
1180                    0x5ca681f025a80e44 => {
1181                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1182                        let mut req = fidl::new_empty!(
1183                            AllocatorAllocateNonSharedCollectionRequest,
1184                            fidl::encoding::DefaultFuchsiaResourceDialect
1185                        );
1186                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateNonSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1187                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1188                        Ok(AllocatorRequest::AllocateNonSharedCollection {
1189                            payload: req,
1190                            control_handle,
1191                        })
1192                    }
1193                    0x11a19ff51f0b49c1 => {
1194                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1195                        let mut req = fidl::new_empty!(
1196                            AllocatorAllocateSharedCollectionRequest,
1197                            fidl::encoding::DefaultFuchsiaResourceDialect
1198                        );
1199                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1200                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1201                        Ok(AllocatorRequest::AllocateSharedCollection {
1202                            payload: req,
1203                            control_handle,
1204                        })
1205                    }
1206                    0x550916b0dc1d5b4e => {
1207                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1208                        let mut req = fidl::new_empty!(
1209                            AllocatorBindSharedCollectionRequest,
1210                            fidl::encoding::DefaultFuchsiaResourceDialect
1211                        );
1212                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorBindSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1213                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1214                        Ok(AllocatorRequest::BindSharedCollection { payload: req, control_handle })
1215                    }
1216                    0x4c5ee91b02a7e68d => {
1217                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1218                        let mut req = fidl::new_empty!(
1219                            AllocatorValidateBufferCollectionTokenRequest,
1220                            fidl::encoding::DefaultFuchsiaResourceDialect
1221                        );
1222                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorValidateBufferCollectionTokenRequest>(&header, _body_bytes, handles, &mut req)?;
1223                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1224                        Ok(AllocatorRequest::ValidateBufferCollectionToken {
1225                            payload: req,
1226                            responder: AllocatorValidateBufferCollectionTokenResponder {
1227                                control_handle: std::mem::ManuallyDrop::new(control_handle),
1228                                tx_id: header.tx_id,
1229                            },
1230                        })
1231                    }
1232                    0x6f68f19a3f509c4d => {
1233                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1234                        let mut req = fidl::new_empty!(
1235                            AllocatorSetDebugClientInfoRequest,
1236                            fidl::encoding::DefaultFuchsiaResourceDialect
1237                        );
1238                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1239                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1240                        Ok(AllocatorRequest::SetDebugClientInfo { payload: req, control_handle })
1241                    }
1242                    0x21a881120aa0ddf9 => {
1243                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1244                        let mut req = fidl::new_empty!(
1245                            AllocatorGetVmoInfoRequest,
1246                            fidl::encoding::DefaultFuchsiaResourceDialect
1247                        );
1248                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorGetVmoInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1249                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1250                        Ok(AllocatorRequest::GetVmoInfo {
1251                            payload: req,
1252                            responder: AllocatorGetVmoInfoResponder {
1253                                control_handle: std::mem::ManuallyDrop::new(control_handle),
1254                                tx_id: header.tx_id,
1255                            },
1256                        })
1257                    }
1258                    _ if header.tx_id == 0
1259                        && header
1260                            .dynamic_flags()
1261                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1262                    {
1263                        Ok(AllocatorRequest::_UnknownMethod {
1264                            ordinal: header.ordinal,
1265                            control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1266                            method_type: fidl::MethodType::OneWay,
1267                        })
1268                    }
1269                    _ if header
1270                        .dynamic_flags()
1271                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1272                    {
1273                        this.inner.send_framework_err(
1274                            fidl::encoding::FrameworkErr::UnknownMethod,
1275                            header.tx_id,
1276                            header.ordinal,
1277                            header.dynamic_flags(),
1278                            (bytes, handles),
1279                        )?;
1280                        Ok(AllocatorRequest::_UnknownMethod {
1281                            ordinal: header.ordinal,
1282                            control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1283                            method_type: fidl::MethodType::TwoWay,
1284                        })
1285                    }
1286                    _ => Err(fidl::Error::UnknownOrdinal {
1287                        ordinal: header.ordinal,
1288                        protocol_name:
1289                            <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1290                    }),
1291                }))
1292            },
1293        )
1294    }
1295}
1296
1297/// Allocates system memory buffers.
1298///
1299/// Epitaphs are not used in this protocol.
1300#[derive(Debug)]
1301pub enum AllocatorRequest {
1302    /// Allocates a buffer collection on behalf of a single client (aka
1303    /// initiator) who is also the only participant (from the point of view of
1304    /// sysmem).
1305    ///
1306    /// This call exists mainly for temp/testing purposes.  This call skips the
1307    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
1308    /// allow another participant to specify its constraints.
1309    ///
1310    /// Real clients are encouraged to use
1311    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
1312    /// let relevant participants directly convey their own constraints to
1313    /// sysmem by sending `BufferCollectionToken`s to those participants.
1314    ///
1315    /// + request `collection_request` The server end of the
1316    ///   [`fuchsia.sysmem2/BufferCollection`].
1317    AllocateNonSharedCollection {
1318        payload: AllocatorAllocateNonSharedCollectionRequest,
1319        control_handle: AllocatorControlHandle,
1320    },
1321    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
1322    ///
1323    /// The `BufferCollectionToken` can be "duplicated" for distribution to
1324    /// participants by using
1325    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
1326    /// `BufferCollectionToken` can be converted into a
1327    /// [`fuchsia.sysmem2.BufferCollection`] using
1328    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
1329    ///
1330    /// Buffer constraints can be set via
1331    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1332    ///
1333    /// Success/failure to populate the buffer collection with buffers can be
1334    /// determined from
1335    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
1336    ///
1337    /// Closing the client end of a `BufferCollectionToken` or
1338    /// `BufferCollection` (without `Release` first) will fail all client ends
1339    /// in the same failure domain, which by default is all client ends of the
1340    /// buffer collection. See
1341    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
1342    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
1343    /// separate failure domains within a buffer collection.
1344    AllocateSharedCollection {
1345        payload: AllocatorAllocateSharedCollectionRequest,
1346        control_handle: AllocatorControlHandle,
1347    },
1348    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
1349    /// [`fuchsia.sysmem2/BufferCollection`].
1350    ///
1351    /// At the time of sending this message, the buffer collection hasn't yet
1352    /// been populated with buffers - the participant must first also send
1353    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
1354    /// `BufferCollection` client end.
1355    ///
1356    /// All `BufferCollectionToken`(s) duplicated from a root
1357    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
1358    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
1359    /// existing `BufferCollection` client ends must have sent `SetConstraints`
1360    /// before the logical BufferCollection will be populated with buffers (or
1361    /// will fail if the overall set of constraints can't be satisfied).
1362    ///
1363    /// + request `token` The client endpoint of a channel whose server end was
1364    ///   sent to sysmem using
1365    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
1366    ///   end was sent to sysmem using
1367    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
1368    ///   being "turned in" in exchange for a
1369    ///   [`fuchsia.sysmem2/BufferCollection`].
1370    /// + request `buffer_collection_request` The server end of a
1371    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
1372    ///   client end. The `BufferCollection` channel is a single participant's
1373    ///   connection to the logical buffer collection. Typically there will be
1374    ///   other participants with their own `BufferCollection` channel to the
1375    ///   logical buffer collection.
1376    BindSharedCollection {
1377        payload: AllocatorBindSharedCollectionRequest,
1378        control_handle: AllocatorControlHandle,
1379    },
1380    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
1381    /// the sysmem server.
1382    ///
1383    /// With this call, the client can determine whether an incoming token is a
1384    /// real sysmem token that is known to the sysmem server, without any risk
1385    /// of getting stuck waiting forever on a potentially fake token to complete
1386    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
1387    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
1388    /// FIDL message). In cases where the client trusts the source of the token
1389    /// to provide a real token, this call is not typically needed outside of
1390    /// debugging.
1391    ///
1392    /// If the validate fails sometimes but succeeds other times, the source of
1393    /// the token may itself not be calling
1394    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
1395    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
1396    /// token but before sending the token to the current client. It may be more
1397    /// convenient for the source to use
1398    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
1399    /// token(s), since that call has the sync step built in. Or, the buffer
1400    /// collection may be failing before this call is processed by the sysmem
1401    /// server, as buffer collection failure cleans up sysmem's tracking of
1402    /// associated tokens.
1403    ///
1404    /// This call has no effect on any token.
1405    ///
1406    /// + request `token_server_koid` The koid of the server end of a channel
1407    ///   that might be a BufferCollectionToken channel.  This can be obtained
1408    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
1409    /// - response `is_known` true means sysmem knew of the token at the time
1410    ///   sysmem processed the request, but doesn't guarantee that the token is
1411    ///   still valid by the time the client receives the reply. What it does
1412    ///   guarantee is that the token at least was a real token, so a two-way
1413    ///   call to the token won't stall forever (will fail or succeed fairly
1414    ///   quickly, not stall). This can already be known implicitly if the
1415    ///   source of the token can be trusted to provide a real token. A false
1416    ///   value means the token wasn't known to sysmem at the time sysmem
1417    ///   processed this call, but the token may have previously been valid, or
1418    ///   may yet become valid. Or if the sender of the token isn't trusted to
1419    ///   provide a real token, the token may be fake. It's the responsibility
1420    ///   of the sender to sync with sysmem to ensure that previously
1421    ///   created/duplicated token(s) are known to sysmem, before sending the
1422    ///   token(s) to other participants.
1423    ValidateBufferCollectionToken {
1424        payload: AllocatorValidateBufferCollectionTokenRequest,
1425        responder: AllocatorValidateBufferCollectionTokenResponder,
1426    },
1427    /// Set information about the current client that can be used by sysmem to
1428    /// help diagnose leaking memory and allocation stalls waiting for a
1429    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1430    ///
1431    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
1432    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
1433    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
1434    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
1435    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
1436    /// these `BufferCollection`(s) have the same initial debug client info as
1437    /// the token turned in to create the `BufferCollection`).
1438    ///
1439    /// This info can be subsequently overridden on a per-`Node` basis by
1440    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
1441    ///
1442    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
1443    /// `Allocator` is the most efficient way to ensure that all
1444    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
1445    /// set, and is also more efficient than separately sending the same debug
1446    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
1447    /// created [`fuchsia.sysmem2/Node`].
1448    ///
1449    /// + request `name` This can be an arbitrary string, but the current
1450    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
1451    /// + request `id` This can be an arbitrary id, but the current process ID
1452    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
1453    SetDebugClientInfo {
1454        payload: AllocatorSetDebugClientInfoRequest,
1455        control_handle: AllocatorControlHandle,
1456    },
1457    /// Given a handle to a sysmem-provided VMO, this returns additional info
1458    /// about the corresponding sysmem logical buffer.
1459    ///
1460    /// Most callers will duplicate a VMO handle first and send the duplicate to
1461    /// this call.
1462    ///
1463    /// If the client has created a child VMO of a sysmem-provided VMO, that
1464    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
1465    ///
1466    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
1467    /// - response `buffer_collection_id` The buffer collection ID, which is
1468    ///   unique per logical buffer collection per boot.
1469    /// - response `buffer_index` The buffer index of the buffer within the
1470    ///   buffer collection. This is the same as the index of the buffer within
1471    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
1472    ///   is the same for all sysmem-delivered VMOs corresponding to the same
1473    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
1474    ///   only unique across buffers of a buffer collection. For a given buffer,
1475    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
1476    ///   per boot.
1477    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
1478    ///   the `close_weak_asap` field will be set in the response. This handle
1479    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
1480    ///   the buffer should be closed as soon as possible. This is signalled
1481    ///   shortly after all strong sysmem VMOs to the buffer are closed
1482    ///   (including any held indirectly via strong `BufferCollectionToken` or
1483    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
1484    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
1485    ///   considered a VMO leak caused by the client still holding a weak sysmem
1486    ///   VMO handle and results in loud complaints to the log by sysmem. The
1487    ///   buffers of a collection can be freed independently of each other. The
1488    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
1489    ///   response arrives at the client. A client that isn't prepared to handle
1490    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
1491    ///   the buffer and fail any associated request.
1492    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
1493    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
1494    ///   the VMO handle passed in to this call itself keeps the VMO's info
1495    ///   alive for purposes of responding to this call. Because of this,
1496    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
1497    ///   handles to the VMO when calling; even if other handles are closed
1498    ///   before the GetVmoInfo response arrives at the client).
1499    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
1500    ///   capable of being used with GetVmoInfo due to rights/capability
1501    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
1502    ///   topic [`ZX_INFO_HANDLE_BASIC`].
1503    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
1504    ///   unspecified reason. See the log for more info.
1505    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
1506    ///   wasn't set, or there was some other problem with the request field(s).
1507    GetVmoInfo { payload: AllocatorGetVmoInfoRequest, responder: AllocatorGetVmoInfoResponder },
1508    /// An interaction was received which does not match any known method.
1509    #[non_exhaustive]
1510    _UnknownMethod {
1511        /// Ordinal of the method that was called.
1512        ordinal: u64,
1513        control_handle: AllocatorControlHandle,
1514        method_type: fidl::MethodType,
1515    },
1516}
1517
1518impl AllocatorRequest {
1519    #[allow(irrefutable_let_patterns)]
1520    pub fn into_allocate_non_shared_collection(
1521        self,
1522    ) -> Option<(AllocatorAllocateNonSharedCollectionRequest, AllocatorControlHandle)> {
1523        if let AllocatorRequest::AllocateNonSharedCollection { payload, control_handle } = self {
1524            Some((payload, control_handle))
1525        } else {
1526            None
1527        }
1528    }
1529
1530    #[allow(irrefutable_let_patterns)]
1531    pub fn into_allocate_shared_collection(
1532        self,
1533    ) -> Option<(AllocatorAllocateSharedCollectionRequest, AllocatorControlHandle)> {
1534        if let AllocatorRequest::AllocateSharedCollection { payload, control_handle } = self {
1535            Some((payload, control_handle))
1536        } else {
1537            None
1538        }
1539    }
1540
1541    #[allow(irrefutable_let_patterns)]
1542    pub fn into_bind_shared_collection(
1543        self,
1544    ) -> Option<(AllocatorBindSharedCollectionRequest, AllocatorControlHandle)> {
1545        if let AllocatorRequest::BindSharedCollection { payload, control_handle } = self {
1546            Some((payload, control_handle))
1547        } else {
1548            None
1549        }
1550    }
1551
1552    #[allow(irrefutable_let_patterns)]
1553    pub fn into_validate_buffer_collection_token(
1554        self,
1555    ) -> Option<(
1556        AllocatorValidateBufferCollectionTokenRequest,
1557        AllocatorValidateBufferCollectionTokenResponder,
1558    )> {
1559        if let AllocatorRequest::ValidateBufferCollectionToken { payload, responder } = self {
1560            Some((payload, responder))
1561        } else {
1562            None
1563        }
1564    }
1565
1566    #[allow(irrefutable_let_patterns)]
1567    pub fn into_set_debug_client_info(
1568        self,
1569    ) -> Option<(AllocatorSetDebugClientInfoRequest, AllocatorControlHandle)> {
1570        if let AllocatorRequest::SetDebugClientInfo { payload, control_handle } = self {
1571            Some((payload, control_handle))
1572        } else {
1573            None
1574        }
1575    }
1576
1577    #[allow(irrefutable_let_patterns)]
1578    pub fn into_get_vmo_info(
1579        self,
1580    ) -> Option<(AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResponder)> {
1581        if let AllocatorRequest::GetVmoInfo { payload, responder } = self {
1582            Some((payload, responder))
1583        } else {
1584            None
1585        }
1586    }
1587
1588    /// Name of the method defined in FIDL
1589    pub fn method_name(&self) -> &'static str {
1590        match *self {
1591            AllocatorRequest::AllocateNonSharedCollection { .. } => {
1592                "allocate_non_shared_collection"
1593            }
1594            AllocatorRequest::AllocateSharedCollection { .. } => "allocate_shared_collection",
1595            AllocatorRequest::BindSharedCollection { .. } => "bind_shared_collection",
1596            AllocatorRequest::ValidateBufferCollectionToken { .. } => {
1597                "validate_buffer_collection_token"
1598            }
1599            AllocatorRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
1600            AllocatorRequest::GetVmoInfo { .. } => "get_vmo_info",
1601            AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
1602                "unknown one-way method"
1603            }
1604            AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
1605                "unknown two-way method"
1606            }
1607        }
1608    }
1609}
1610
1611#[derive(Debug, Clone)]
1612pub struct AllocatorControlHandle {
1613    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1614}
1615
1616impl fidl::endpoints::ControlHandle for AllocatorControlHandle {
1617    fn shutdown(&self) {
1618        self.inner.shutdown()
1619    }
1620    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
1621        self.inner.shutdown_with_epitaph(status)
1622    }
1623
1624    fn is_closed(&self) -> bool {
1625        self.inner.channel().is_closed()
1626    }
1627    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
1628        self.inner.channel().on_closed()
1629    }
1630
1631    #[cfg(target_os = "fuchsia")]
1632    fn signal_peer(
1633        &self,
1634        clear_mask: zx::Signals,
1635        set_mask: zx::Signals,
1636    ) -> Result<(), zx_status::Status> {
1637        use fidl::Peered;
1638        self.inner.channel().signal_peer(clear_mask, set_mask)
1639    }
1640}
1641
1642impl AllocatorControlHandle {}
1643
1644#[must_use = "FIDL methods require a response to be sent"]
1645#[derive(Debug)]
1646pub struct AllocatorValidateBufferCollectionTokenResponder {
1647    control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1648    tx_id: u32,
1649}
1650
1651/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1652/// if the responder is dropped without sending a response, so that the client
1653/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1654impl std::ops::Drop for AllocatorValidateBufferCollectionTokenResponder {
1655    fn drop(&mut self) {
1656        self.control_handle.shutdown();
1657        // Safety: drops once, never accessed again
1658        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1659    }
1660}
1661
1662impl fidl::endpoints::Responder for AllocatorValidateBufferCollectionTokenResponder {
1663    type ControlHandle = AllocatorControlHandle;
1664
1665    fn control_handle(&self) -> &AllocatorControlHandle {
1666        &self.control_handle
1667    }
1668
1669    fn drop_without_shutdown(mut self) {
1670        // Safety: drops once, never accessed again due to mem::forget
1671        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1672        // Prevent Drop from running (which would shut down the channel)
1673        std::mem::forget(self);
1674    }
1675}
1676
1677impl AllocatorValidateBufferCollectionTokenResponder {
1678    /// Sends a response to the FIDL transaction.
1679    ///
1680    /// Sets the channel to shutdown if an error occurs.
1681    pub fn send(
1682        self,
1683        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1684    ) -> Result<(), fidl::Error> {
1685        let _result = self.send_raw(payload);
1686        if _result.is_err() {
1687            self.control_handle.shutdown();
1688        }
1689        self.drop_without_shutdown();
1690        _result
1691    }
1692
1693    /// Similar to "send" but does not shutdown the channel if an error occurs.
1694    pub fn send_no_shutdown_on_err(
1695        self,
1696        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1697    ) -> Result<(), fidl::Error> {
1698        let _result = self.send_raw(payload);
1699        self.drop_without_shutdown();
1700        _result
1701    }
1702
1703    fn send_raw(
1704        &self,
1705        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1706    ) -> Result<(), fidl::Error> {
1707        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
1708            AllocatorValidateBufferCollectionTokenResponse,
1709        >>(
1710            fidl::encoding::Flexible::new(payload),
1711            self.tx_id,
1712            0x4c5ee91b02a7e68d,
1713            fidl::encoding::DynamicFlags::FLEXIBLE,
1714        )
1715    }
1716}
1717
1718#[must_use = "FIDL methods require a response to be sent"]
1719#[derive(Debug)]
1720pub struct AllocatorGetVmoInfoResponder {
1721    control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1722    tx_id: u32,
1723}
1724
1725/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1726/// if the responder is dropped without sending a response, so that the client
1727/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1728impl std::ops::Drop for AllocatorGetVmoInfoResponder {
1729    fn drop(&mut self) {
1730        self.control_handle.shutdown();
1731        // Safety: drops once, never accessed again
1732        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1733    }
1734}
1735
1736impl fidl::endpoints::Responder for AllocatorGetVmoInfoResponder {
1737    type ControlHandle = AllocatorControlHandle;
1738
1739    fn control_handle(&self) -> &AllocatorControlHandle {
1740        &self.control_handle
1741    }
1742
1743    fn drop_without_shutdown(mut self) {
1744        // Safety: drops once, never accessed again due to mem::forget
1745        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1746        // Prevent Drop from running (which would shut down the channel)
1747        std::mem::forget(self);
1748    }
1749}
1750
1751impl AllocatorGetVmoInfoResponder {
1752    /// Sends a response to the FIDL transaction.
1753    ///
1754    /// Sets the channel to shutdown if an error occurs.
1755    pub fn send(
1756        self,
1757        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1758    ) -> Result<(), fidl::Error> {
1759        let _result = self.send_raw(result);
1760        if _result.is_err() {
1761            self.control_handle.shutdown();
1762        }
1763        self.drop_without_shutdown();
1764        _result
1765    }
1766
1767    /// Similar to "send" but does not shutdown the channel if an error occurs.
1768    pub fn send_no_shutdown_on_err(
1769        self,
1770        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1771    ) -> Result<(), fidl::Error> {
1772        let _result = self.send_raw(result);
1773        self.drop_without_shutdown();
1774        _result
1775    }
1776
1777    fn send_raw(
1778        &self,
1779        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1780    ) -> Result<(), fidl::Error> {
1781        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
1782            AllocatorGetVmoInfoResponse,
1783            Error,
1784        >>(
1785            fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
1786            self.tx_id,
1787            0x21a881120aa0ddf9,
1788            fidl::encoding::DynamicFlags::FLEXIBLE,
1789        )
1790    }
1791}
1792
1793#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
1794pub struct BufferCollectionMarker;
1795
1796impl fidl::endpoints::ProtocolMarker for BufferCollectionMarker {
1797    type Proxy = BufferCollectionProxy;
1798    type RequestStream = BufferCollectionRequestStream;
1799    #[cfg(target_os = "fuchsia")]
1800    type SynchronousProxy = BufferCollectionSynchronousProxy;
1801
1802    const DEBUG_NAME: &'static str = "(anonymous) BufferCollection";
1803}
1804pub type BufferCollectionWaitForAllBuffersAllocatedResult =
1805    Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>;
1806pub type BufferCollectionCheckAllBuffersAllocatedResult = Result<(), Error>;
1807
1808pub trait BufferCollectionProxyInterface: Send + Sync {
1809    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
1810    fn r#sync(&self) -> Self::SyncResponseFut;
1811    fn r#release(&self) -> Result<(), fidl::Error>;
1812    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
1813    fn r#set_debug_client_info(
1814        &self,
1815        payload: &NodeSetDebugClientInfoRequest,
1816    ) -> Result<(), fidl::Error>;
1817    fn r#set_debug_timeout_log_deadline(
1818        &self,
1819        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
1820    ) -> Result<(), fidl::Error>;
1821    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
1822    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
1823        + Send;
1824    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
1825    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
1826        + Send;
1827    fn r#is_alternate_for(
1828        &self,
1829        payload: NodeIsAlternateForRequest,
1830    ) -> Self::IsAlternateForResponseFut;
1831    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
1832        + Send;
1833    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
1834    fn r#set_weak(&self) -> Result<(), fidl::Error>;
1835    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
1836    fn r#attach_node_tracking(
1837        &self,
1838        payload: NodeAttachNodeTrackingRequest,
1839    ) -> Result<(), fidl::Error>;
1840    fn r#set_constraints(
1841        &self,
1842        payload: BufferCollectionSetConstraintsRequest,
1843    ) -> Result<(), fidl::Error>;
1844    type WaitForAllBuffersAllocatedResponseFut: std::future::Future<
1845            Output = Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error>,
1846        > + Send;
1847    fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut;
1848    type CheckAllBuffersAllocatedResponseFut: std::future::Future<
1849            Output = Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error>,
1850        > + Send;
1851    fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut;
1852    fn r#attach_token(
1853        &self,
1854        payload: BufferCollectionAttachTokenRequest,
1855    ) -> Result<(), fidl::Error>;
1856    fn r#attach_lifetime_tracking(
1857        &self,
1858        payload: BufferCollectionAttachLifetimeTrackingRequest,
1859    ) -> Result<(), fidl::Error>;
1860}
1861#[derive(Debug)]
1862#[cfg(target_os = "fuchsia")]
1863pub struct BufferCollectionSynchronousProxy {
1864    client: fidl::client::sync::Client,
1865}
1866
1867#[cfg(target_os = "fuchsia")]
1868impl fidl::endpoints::SynchronousProxy for BufferCollectionSynchronousProxy {
1869    type Proxy = BufferCollectionProxy;
1870    type Protocol = BufferCollectionMarker;
1871
1872    fn from_channel(inner: fidl::Channel) -> Self {
1873        Self::new(inner)
1874    }
1875
1876    fn into_channel(self) -> fidl::Channel {
1877        self.client.into_channel()
1878    }
1879
1880    fn as_channel(&self) -> &fidl::Channel {
1881        self.client.as_channel()
1882    }
1883}
1884
1885#[cfg(target_os = "fuchsia")]
1886impl BufferCollectionSynchronousProxy {
1887    pub fn new(channel: fidl::Channel) -> Self {
1888        let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
1889        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
1890    }
1891
1892    pub fn into_channel(self) -> fidl::Channel {
1893        self.client.into_channel()
1894    }
1895
1896    /// Waits until an event arrives and returns it. It is safe for other
1897    /// threads to make concurrent requests while waiting for an event.
1898    pub fn wait_for_event(
1899        &self,
1900        deadline: zx::MonotonicInstant,
1901    ) -> Result<BufferCollectionEvent, fidl::Error> {
1902        BufferCollectionEvent::decode(self.client.wait_for_event(deadline)?)
1903    }
1904
1905    /// Ensure that previous messages have been received server side. This is
1906    /// particularly useful after previous messages that created new tokens,
1907    /// because a token must be known to the sysmem server before sending the
1908    /// token to another participant.
1909    ///
1910    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
1911    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
1912    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
1913    /// to mitigate the possibility of a hostile/fake
1914    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
1915    /// Another way is to pass the token to
1916    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
1917    /// the token as part of exchanging it for a
1918    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
1919    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
1920    /// of stalling.
1921    ///
1922    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
1923    /// and then starting and completing a `Sync`, it's then safe to send the
1924    /// `BufferCollectionToken` client ends to other participants knowing the
1925    /// server will recognize the tokens when they're sent by the other
1926    /// participants to sysmem in a
1927    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
1928    /// efficient way to create tokens while avoiding unnecessary round trips.
1929    ///
1930    /// Other options include waiting for each
1931    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
1932    /// individually (using separate call to `Sync` after each), or calling
1933    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
1934    /// converted to a `BufferCollection` via
1935    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
1936    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
1937    /// the sync step and can create multiple tokens at once.
1938    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
1939        let _response = self.client.send_query::<
1940            fidl::encoding::EmptyPayload,
1941            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
1942        >(
1943            (),
1944            0x11ac2555cf575b54,
1945            fidl::encoding::DynamicFlags::FLEXIBLE,
1946            ___deadline,
1947        )?
1948        .into_result::<BufferCollectionMarker>("sync")?;
1949        Ok(_response)
1950    }
1951
1952    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
1953    ///
1954    /// Normally a participant will convert a `BufferCollectionToken` into a
1955    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
1956    /// `Release` via the token (and then close the channel immediately or
1957    /// shortly later in response to server closing the server end), which
1958    /// avoids causing buffer collection failure. Without a prior `Release`,
1959    /// closing the `BufferCollectionToken` client end will cause buffer
1960    /// collection failure.
1961    ///
1962    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
1963    ///
1964    /// By default the server handles unexpected closure of a
1965    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
1966    /// first) by failing the buffer collection. Partly this is to expedite
1967    /// closing VMO handles to reclaim memory when any participant fails. If a
1968    /// participant would like to cleanly close a `BufferCollection` without
1969    /// causing buffer collection failure, the participant can send `Release`
1970    /// before closing the `BufferCollection` client end. The `Release` can
1971    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
1972    /// buffer collection won't require constraints from this node in order to
1973    /// allocate. If after `SetConstraints`, the constraints are retained and
1974    /// aggregated, despite the lack of `BufferCollection` connection at the
1975    /// time of constraints aggregation.
1976    ///
1977    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
1978    ///
1979    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
1980    /// end (without `Release` first) will trigger failure of the buffer
1981    /// collection. To close a `BufferCollectionTokenGroup` channel without
1982    /// failing the buffer collection, ensure that AllChildrenPresent() has been
1983    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
1984    /// client end.
1985    ///
1986    /// If `Release` occurs before
1987    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
1988    /// buffer collection will fail (triggered by reception of `Release` without
1989    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
1990    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
1991    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
1992    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
1993    /// close requires `AllChildrenPresent` (if not already sent), then
1994    /// `Release`, then close client end.
1995    ///
1996    /// If `Release` occurs after `AllChildrenPresent`, the children and all
1997    /// their constraints remain intact (just as they would if the
1998    /// `BufferCollectionTokenGroup` channel had remained open), and the client
1999    /// end close doesn't trigger buffer collection failure.
2000    ///
2001    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2002    ///
2003    /// For brevity, the per-channel-protocol paragraphs above ignore the
2004    /// separate failure domain created by
2005    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2006    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2007    /// unexpectedly closes (without `Release` first) and that client end is
2008    /// under a failure domain, instead of failing the whole buffer collection,
2009    /// the failure domain is failed, but the buffer collection itself is
2010    /// isolated from failure of the failure domain. Such failure domains can be
2011    /// nested, in which case only the inner-most failure domain in which the
2012    /// `Node` resides fails.
2013    pub fn r#release(&self) -> Result<(), fidl::Error> {
2014        self.client.send::<fidl::encoding::EmptyPayload>(
2015            (),
2016            0x6a5cae7d6d6e04c6,
2017            fidl::encoding::DynamicFlags::FLEXIBLE,
2018        )
2019    }
2020
2021    /// Set a name for VMOs in this buffer collection.
2022    ///
2023    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2024    /// will be truncated to fit. The name of the vmo will be suffixed with the
2025    /// buffer index within the collection (if the suffix fits within
2026    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2027    /// listed in the inspect data.
2028    ///
2029    /// The name only affects VMOs allocated after the name is set; this call
2030    /// does not rename existing VMOs. If multiple clients set different names
2031    /// then the larger priority value will win. Setting a new name with the
2032    /// same priority as a prior name doesn't change the name.
2033    ///
2034    /// All table fields are currently required.
2035    ///
2036    /// + request `priority` The name is only set if this is the first `SetName`
2037    ///   or if `priority` is greater than any previous `priority` value in
2038    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
2039    /// + request `name` The name for VMOs created under this buffer collection.
2040    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2041        self.client.send::<NodeSetNameRequest>(
2042            payload,
2043            0xb41f1624f48c1e9,
2044            fidl::encoding::DynamicFlags::FLEXIBLE,
2045        )
2046    }
2047
2048    /// Set information about the current client that can be used by sysmem to
2049    /// help diagnose leaking memory and allocation stalls waiting for a
2050    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2051    ///
2052    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2053    /// `Node`(s) derived from this `Node`, unless overriden by
2054    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2055    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2056    ///
2057    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2058    /// `Allocator` is the most efficient way to ensure that all
2059    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2060    /// set, and is also more efficient than separately sending the same debug
2061    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2062    /// created [`fuchsia.sysmem2/Node`].
2063    ///
2064    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2065    /// indicate which client is closing their channel first, leading to subtree
2066    /// failure (which can be normal if the purpose of the subtree is over, but
2067    /// if happening earlier than expected, the client-channel-specific name can
2068    /// help diagnose where the failure is first coming from, from sysmem's
2069    /// point of view).
2070    ///
2071    /// All table fields are currently required.
2072    ///
2073    /// + request `name` This can be an arbitrary string, but the current
2074    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
2075    /// + request `id` This can be an arbitrary id, but the current process ID
2076    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
2077    pub fn r#set_debug_client_info(
2078        &self,
2079        mut payload: &NodeSetDebugClientInfoRequest,
2080    ) -> Result<(), fidl::Error> {
2081        self.client.send::<NodeSetDebugClientInfoRequest>(
2082            payload,
2083            0x5cde8914608d99b1,
2084            fidl::encoding::DynamicFlags::FLEXIBLE,
2085        )
2086    }
2087
2088    /// Sysmem logs a warning if sysmem hasn't seen
2089    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
2090    /// within 5 seconds after creation of a new collection.
2091    ///
2092    /// Clients can call this method to change when the log is printed. If
2093    /// multiple client set the deadline, it's unspecified which deadline will
2094    /// take effect.
2095    ///
2096    /// In most cases the default works well.
2097    ///
2098    /// All table fields are currently required.
2099    ///
2100    /// + request `deadline` The time at which sysmem will start trying to log
2101    ///   the warning, unless all constraints are with sysmem by then.
2102    pub fn r#set_debug_timeout_log_deadline(
2103        &self,
2104        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
2105    ) -> Result<(), fidl::Error> {
2106        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
2107            payload,
2108            0x716b0af13d5c0806,
2109            fidl::encoding::DynamicFlags::FLEXIBLE,
2110        )
2111    }
2112
2113    /// This enables verbose logging for the buffer collection.
2114    ///
2115    /// Verbose logging includes constraints set via
2116    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
2117    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
2118    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
2119    /// the tree of `Node`(s).
2120    ///
2121    /// Normally sysmem prints only a single line complaint when aggregation
2122    /// fails, with just the specific detailed reason that aggregation failed,
2123    /// with little surrounding context.  While this is often enough to diagnose
2124    /// a problem if only a small change was made and everything was working
2125    /// before the small change, it's often not particularly helpful for getting
2126    /// a new buffer collection to work for the first time.  Especially with
2127    /// more complex trees of nodes, involving things like
2128    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
2129    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
2130    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
2131    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
2132    /// looks like and why it's failing a logical allocation, or why a tree or
2133    /// subtree is failing sooner than expected.
2134    ///
2135    /// The intent of the extra logging is to be acceptable from a performance
2136    /// point of view, under the assumption that verbose logging is only enabled
2137    /// on a low number of buffer collections. If we're not tracking down a bug,
2138    /// we shouldn't send this message.
2139    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
2140        self.client.send::<fidl::encoding::EmptyPayload>(
2141            (),
2142            0x5209c77415b4dfad,
2143            fidl::encoding::DynamicFlags::FLEXIBLE,
2144        )
2145    }
2146
2147    /// This gets a handle that can be used as a parameter to
2148    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
2149    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
2150    /// client obtained this handle from this `Node`.
2151    ///
2152    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
2153    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
2154    /// despite the two calls typically being on different channels.
2155    ///
2156    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
2157    ///
2158    /// All table fields are currently required.
2159    ///
2160    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
2161    ///   different `Node` channel, to prove that the client obtained the handle
2162    ///   from this `Node`.
2163    pub fn r#get_node_ref(
2164        &self,
2165        ___deadline: zx::MonotonicInstant,
2166    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
2167        let _response = self.client.send_query::<
2168            fidl::encoding::EmptyPayload,
2169            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
2170        >(
2171            (),
2172            0x5b3d0e51614df053,
2173            fidl::encoding::DynamicFlags::FLEXIBLE,
2174            ___deadline,
2175        )?
2176        .into_result::<BufferCollectionMarker>("get_node_ref")?;
2177        Ok(_response)
2178    }
2179
2180    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
2181    /// rooted at a different child token of a common parent
2182    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
2183    /// passed-in `node_ref`.
2184    ///
2185    /// This call is for assisting with admission control de-duplication, and
2186    /// with debugging.
2187    ///
2188    /// The `node_ref` must be obtained using
2189    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
2190    ///
2191    /// The `node_ref` can be a duplicated handle; it's not necessary to call
2192    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
2193    ///
2194    /// If a calling token may not actually be a valid token at all due to a
2195    /// potentially hostile/untrusted provider of the token, call
2196    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
2197    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
2198    /// never responds due to a calling token not being a real token (not really
2199    /// talking to sysmem).  Another option is to call
2200    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
2201    /// which also validates the token along with converting it to a
2202    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
2203    ///
2204    /// All table fields are currently required.
2205    ///
2206    /// - response `is_alternate`
2207    ///   - true: The first parent node in common between the calling node and
2208    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
2209    ///     that the calling `Node` and the `node_ref` `Node` will not have both
2210    ///     their constraints apply - rather sysmem will choose one or the other
2211    ///     of the constraints - never both.  This is because only one child of
2212    ///     a `BufferCollectionTokenGroup` is selected during logical
2213    ///     allocation, with only that one child's subtree contributing to
2214    ///     constraints aggregation.
2215    ///   - false: The first parent node in common between the calling `Node`
2216    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
2217    ///     Currently, this means the first parent node in common is a
2218    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
2219    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
2220    ///     `Node` may have both their constraints apply during constraints
2221    ///     aggregation of the logical allocation, if both `Node`(s) are
2222    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
2223    ///     this case, there is no `BufferCollectionTokenGroup` that will
2224    ///     directly prevent the two `Node`(s) from both being selected and
2225    ///     their constraints both aggregated, but even when false, one or both
2226    ///     `Node`(s) may still be eliminated from consideration if one or both
2227    ///     `Node`(s) has a direct or indirect parent
2228    ///     `BufferCollectionTokenGroup` which selects a child subtree other
2229    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
2230    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
2231    ///   associated with the same buffer collection as the calling `Node`.
2232    ///   Another reason for this error is if the `node_ref` is an
2233    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
2234    ///   a real `node_ref` obtained from `GetNodeRef`.
2235    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
2236    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
2237    ///   the needed rights expected on a real `node_ref`.
2238    /// * No other failing status codes are returned by this call.  However,
2239    ///   sysmem may add additional codes in future, so the client should have
2240    ///   sensible default handling for any failing status code.
2241    pub fn r#is_alternate_for(
2242        &self,
2243        mut payload: NodeIsAlternateForRequest,
2244        ___deadline: zx::MonotonicInstant,
2245    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
2246        let _response = self.client.send_query::<
2247            NodeIsAlternateForRequest,
2248            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
2249        >(
2250            &mut payload,
2251            0x3a58e00157e0825,
2252            fidl::encoding::DynamicFlags::FLEXIBLE,
2253            ___deadline,
2254        )?
2255        .into_result::<BufferCollectionMarker>("is_alternate_for")?;
2256        Ok(_response.map(|x| x))
2257    }
2258
2259    /// Get the buffer collection ID. This ID is also available from
2260    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
2261    /// within the collection).
2262    ///
2263    /// This call is mainly useful in situations where we can't convey a
2264    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
2265    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
2266    /// handle, which can be joined back up with a `BufferCollection` client end
2267    /// that was created via a different path. Prefer to convey a
2268    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
2269    ///
2270    /// Trusting a `buffer_collection_id` value from a source other than sysmem
2271    /// is analogous to trusting a koid value from a source other than zircon.
2272    /// Both should be avoided unless really necessary, and both require
2273    /// caution. In some situations it may be reasonable to refer to a
2274    /// pre-established `BufferCollection` by `buffer_collection_id` via a
2275    /// protocol for efficiency reasons, but an incoming value purporting to be
2276    /// a `buffer_collection_id` is not sufficient alone to justify granting the
2277    /// sender of the `buffer_collection_id` any capability. The sender must
2278    /// first prove to a receiver that the sender has/had a VMO or has/had a
2279    /// `BufferCollectionToken` to the same collection by sending a handle that
2280    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
2281    /// `buffer_collection_id` value. The receiver should take care to avoid
2282    /// assuming that a sender had a `BufferCollectionToken` in cases where the
2283    /// sender has only proven that the sender had a VMO.
2284    ///
2285    /// - response `buffer_collection_id` This ID is unique per buffer
2286    ///   collection per boot. Each buffer is uniquely identified by the
2287    ///   `buffer_collection_id` and `buffer_index` together.
2288    pub fn r#get_buffer_collection_id(
2289        &self,
2290        ___deadline: zx::MonotonicInstant,
2291    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
2292        let _response = self.client.send_query::<
2293            fidl::encoding::EmptyPayload,
2294            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
2295        >(
2296            (),
2297            0x77d19a494b78ba8c,
2298            fidl::encoding::DynamicFlags::FLEXIBLE,
2299            ___deadline,
2300        )?
2301        .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
2302        Ok(_response)
2303    }
2304
2305    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
2306    /// created after this message to weak, which means that a client's `Node`
2307    /// client end (or a child created after this message) is not alone
2308    /// sufficient to keep allocated VMOs alive.
2309    ///
2310    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
2311    /// `close_weak_asap`.
2312    ///
2313    /// This message is only permitted before the `Node` becomes ready for
2314    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
2315    ///   * `BufferCollectionToken`: any time
2316    ///   * `BufferCollection`: before `SetConstraints`
2317    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
2318    ///
2319    /// Currently, no conversion from strong `Node` to weak `Node` after ready
2320    /// for allocation is provided, but a client can simulate that by creating
2321    /// an additional `Node` before allocation and setting that additional
2322    /// `Node` to weak, and then potentially at some point later sending
2323    /// `Release` and closing the client end of the client's strong `Node`, but
2324    /// keeping the client's weak `Node`.
2325    ///
2326    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
2327    /// collection failure (all `Node` client end(s) will see
2328    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
2329    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
2330    /// this situation until all `Node`(s) are ready for allocation. For initial
2331    /// allocation to succeed, at least one strong `Node` is required to exist
2332    /// at allocation time, but after that client receives VMO handles, that
2333    /// client can `BufferCollection.Release` and close the client end without
2334    /// causing this type of failure.
2335    ///
2336    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
2337    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
2338    /// separately as appropriate.
2339    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
2340        self.client.send::<fidl::encoding::EmptyPayload>(
2341            (),
2342            0x22dd3ea514eeffe1,
2343            fidl::encoding::DynamicFlags::FLEXIBLE,
2344        )
2345    }
2346
2347    /// This indicates to sysmem that the client is prepared to pay attention to
2348    /// `close_weak_asap`.
2349    ///
2350    /// If sent, this message must be before
2351    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
2352    ///
2353    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
2354    /// send this message before `WaitForAllBuffersAllocated`, or a parent
2355    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
2356    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
2357    /// trigger buffer collection failure.
2358    ///
2359    /// This message is necessary because weak sysmem VMOs have not always been
2360    /// a thing, so older clients are not aware of the need to pay attention to
2361    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
2362    /// sysmem weak VMO handles asap. By having this message and requiring
2363    /// participants to indicate their acceptance of this aspect of the overall
2364    /// protocol, we avoid situations where an older client is delivered a weak
2365    /// VMO without any way for sysmem to get that VMO to close quickly later
2366    /// (and on a per-buffer basis).
2367    ///
2368    /// A participant that doesn't handle `close_weak_asap` and also doesn't
2369    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
2370    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
2371    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
2372    /// same participant has a child/delegate which does retrieve VMOs, that
2373    /// child/delegate will need to send `SetWeakOk` before
2374    /// `WaitForAllBuffersAllocated`.
2375    ///
2376    /// + request `for_child_nodes_also` If present and true, this means direct
2377    ///   child nodes of this node created after this message plus all
2378    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
2379    ///   those nodes. Any child node of this node that was created before this
2380    ///   message is not included. This setting is "sticky" in the sense that a
2381    ///   subsequent `SetWeakOk` without this bool set to true does not reset
2382    ///   the server-side bool. If this creates a problem for a participant, a
2383    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
2384    ///   tokens instead, as appropriate. A participant should only set
2385    ///   `for_child_nodes_also` true if the participant can really promise to
2386    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
2387    ///   weak VMO handles held by participants holding the corresponding child
2388    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
2389    ///   which are using sysmem(1) can be weak, despite the clients of those
2390    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
2391    ///   direct way to find out about `close_weak_asap`. This only applies to
2392    ///   descendents of this `Node` which are using sysmem(1), not to this
2393    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
2394    ///   token, which will fail allocation unless an ancestor of this `Node`
2395    ///   specified `for_child_nodes_also` true.
2396    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
2397        self.client.send::<NodeSetWeakOkRequest>(
2398            &mut payload,
2399            0x38a44fc4d7724be9,
2400            fidl::encoding::DynamicFlags::FLEXIBLE,
2401        )
2402    }
2403
2404    /// The server_end will be closed after this `Node` and any child nodes have
2405    /// have released their buffer counts, making those counts available for
2406    /// reservation by a different `Node` via
2407    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
2408    ///
2409    /// The `Node` buffer counts may not be released until the entire tree of
2410    /// `Node`(s) is closed or failed, because
2411    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
2412    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
2413    /// `Node` buffer counts remain reserved until the orphaned node is later
2414    /// cleaned up.
2415    ///
2416    /// If the `Node` exceeds a fairly large number of attached eventpair server
2417    /// ends, a log message will indicate this and the `Node` (and the
2418    /// appropriate) sub-tree will fail.
2419    ///
2420    /// The `server_end` will remain open when
2421    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
2422    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
2423    /// [`fuchsia.sysmem2/BufferCollection`].
2424    ///
2425    /// This message can also be used with a
2426    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
2427    pub fn r#attach_node_tracking(
2428        &self,
2429        mut payload: NodeAttachNodeTrackingRequest,
2430    ) -> Result<(), fidl::Error> {
2431        self.client.send::<NodeAttachNodeTrackingRequest>(
2432            &mut payload,
2433            0x3f22f2a293d3cdac,
2434            fidl::encoding::DynamicFlags::FLEXIBLE,
2435        )
2436    }
2437
2438    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
2439    /// collection.
2440    ///
2441    /// A participant may only call
2442    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
2443    /// [`fuchsia.sysmem2/BufferCollection`].
2444    ///
2445    /// For buffer allocation to be attempted, all holders of a
2446    /// `BufferCollection` client end need to call `SetConstraints` before
2447    /// sysmem will attempt to allocate buffers.
2448    ///
2449    /// + request `constraints` These are the constraints on the buffer
2450    ///   collection imposed by the sending client/participant.  The
2451    ///   `constraints` field is not required to be set. If not set, the client
2452    ///   is not setting any actual constraints, but is indicating that the
2453    ///   client has no constraints to set. A client that doesn't set the
2454    ///   `constraints` field won't receive any VMO handles, but can still find
2455    ///   out how many buffers were allocated and can still refer to buffers by
2456    ///   their `buffer_index`.
2457    pub fn r#set_constraints(
2458        &self,
2459        mut payload: BufferCollectionSetConstraintsRequest,
2460    ) -> Result<(), fidl::Error> {
2461        self.client.send::<BufferCollectionSetConstraintsRequest>(
2462            &mut payload,
2463            0x1fde0f19d650197b,
2464            fidl::encoding::DynamicFlags::FLEXIBLE,
2465        )
2466    }
2467
2468    /// Wait until all buffers are allocated.
2469    ///
2470    /// This FIDL call completes when buffers have been allocated, or completes
2471    /// with some failure detail if allocation has been attempted but failed.
2472    ///
2473    /// The following must occur before buffers will be allocated:
2474    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
2475    ///     collection must be turned in via `BindSharedCollection` to get a
2476    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
2477    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
2478    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
2479    ///     to them.
2480    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
2481    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
2482    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
2483    ///     sent to them.
2484    ///
2485    /// - result `buffer_collection_info` The VMO handles and other related
2486    ///   info.
2487    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
2488    ///   cannot be fulfilled due to resource exhaustion.
2489    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
2490    ///   malformed.
2491    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
2492    ///   request is valid but cannot be satisfied, perhaps due to hardware
2493    ///   limitations. This can happen if participants have incompatible
2494    ///   constraints (empty intersection, roughly speaking). See the log for
2495    ///   more info. In cases where a participant could potentially be treated
2496    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
2497    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
2498    ///   error code if there aren't enough buffers in the pre-existing
2499    ///   collection to satisfy the constraints set on the attached token and
2500    ///   any sub-tree of tokens derived from the attached token.
2501    pub fn r#wait_for_all_buffers_allocated(
2502        &self,
2503        ___deadline: zx::MonotonicInstant,
2504    ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
2505        let _response = self
2506            .client
2507            .send_query::<fidl::encoding::EmptyPayload, fidl::encoding::FlexibleResultType<
2508                BufferCollectionWaitForAllBuffersAllocatedResponse,
2509                Error,
2510            >>(
2511                (), 0x62300344b61404e, fidl::encoding::DynamicFlags::FLEXIBLE, ___deadline
2512            )?
2513            .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
2514        Ok(_response.map(|x| x))
2515    }
2516
2517    /// Checks whether all the buffers have been allocated, in a polling
2518    /// fashion.
2519    ///
2520    /// * If the buffer collection has been allocated, returns success.
2521    /// * If the buffer collection failed allocation, returns the same
2522    ///   [`fuchsia.sysmem2/Error`] as
2523    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
2524    ///   return.
2525    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
2526    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
2527    ///   would not respond quickly.
2528    pub fn r#check_all_buffers_allocated(
2529        &self,
2530        ___deadline: zx::MonotonicInstant,
2531    ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
2532        let _response = self.client.send_query::<
2533            fidl::encoding::EmptyPayload,
2534            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
2535        >(
2536            (),
2537            0x35a5fe77ce939c10,
2538            fidl::encoding::DynamicFlags::FLEXIBLE,
2539            ___deadline,
2540        )?
2541        .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
2542        Ok(_response.map(|x| x))
2543    }
2544
2545    /// Create a new token to add a new participant to an existing logical
2546    /// buffer collection, if the existing collection's buffer counts,
2547    /// constraints, and participants allow.
2548    ///
2549    /// This can be useful in replacing a failed participant, and/or in
2550    /// adding/re-adding a participant after buffers have already been
2551    /// allocated.
2552    ///
2553    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
2554    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
2555    /// goes through the normal procedure of setting constraints or closing
2556    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
2557    /// clients' point of view, despite the possibility that all the buffers
2558    /// were actually allocated previously. This process is called "logical
2559    /// allocation". Most instances of "allocation" in docs for other messages
2560    /// can also be read as "allocation or logical allocation" while remaining
2561    /// valid, but we just say "allocation" in most places for brevity/clarity
2562    /// of explanation, with the details of "logical allocation" left for the
2563    /// docs here on `AttachToken`.
2564    ///
2565    /// Failure of an attached `Node` does not propagate to the parent of the
2566    /// attached `Node`. More generally, failure of a child `Node` is blocked
2567    /// from reaching its parent `Node` if the child is attached, or if the
2568    /// child is dispensable and the failure occurred after logical allocation
2569    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
2570    ///
2571    /// A participant may in some scenarios choose to initially use a
2572    /// dispensable token for a given instance of a delegate participant, and
2573    /// then later if the first instance of that delegate participant fails, a
2574    /// new second instance of that delegate participant my be given a token
2575    /// created with `AttachToken`.
2576    ///
2577    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
2578    /// client end, the token acts like any other token. The client can
2579    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
2580    /// and can send the token to a different process/participant. The
2581    /// `BufferCollectionToken` `Node` should be converted to a
2582    /// `BufferCollection` `Node` as normal by sending
2583    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
2584    /// without causing subtree failure by sending
2585    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
2586    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
2587    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
2588    /// the `BufferCollection`.
2589    ///
2590    /// Within the subtree, a success result from
2591    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
2592    /// the subtree participants' constraints were satisfiable using the
2593    /// already-existing buffer collection, the already-established
2594    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
2595    /// constraints, and the already-existing other participants (already added
2596    /// via successful logical allocation) and their specified buffer counts in
2597    /// their constraints. A failure result means the new participants'
2598    /// constraints cannot be satisfied using the existing buffer collection and
2599    /// its already-added participants. Creating a new collection instead may
2600    /// allow all participants' constraints to be satisfied, assuming
2601    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
2602    /// used.
2603    ///
2604    /// A token created with `AttachToken` performs constraints aggregation with
2605    /// all constraints currently in effect on the buffer collection, plus the
2606    /// attached token under consideration plus child tokens under the attached
2607    /// token which are not themselves an attached token or under such a token.
2608    /// Further subtrees under this subtree are considered for logical
2609    /// allocation only after this subtree has completed logical allocation.
2610    ///
2611    /// Assignment of existing buffers to participants'
2612    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
2613    /// etc is first-come first-served, but a child can't logically allocate
2614    /// before all its parents have sent `SetConstraints`.
2615    ///
2616    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
2617    /// in contrast to `AttachToken`, has the created token `Node` + child
2618    /// `Node`(s) (in the created subtree but not in any subtree under this
2619    /// subtree) participate in constraints aggregation along with its parent
2620    /// during the parent's allocation or logical allocation.
2621    ///
2622    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
2623    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
2624    /// sysmem before the new token can be passed to `BindSharedCollection`. The
2625    /// `Sync` of the new token can be accomplished with
2626    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
2627    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
2628    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
2629    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
2630    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
2631    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
2632    /// created token, to also sync those additional tokens to sysmem using a
2633    /// single round-trip.
2634    ///
2635    /// All table fields are currently required.
2636    ///
2637    /// + request `rights_attentuation_mask` This allows attenuating the VMO
2638    ///   rights of the subtree. These values for `rights_attenuation_mask`
2639    ///   result in no attenuation (note that 0 is not on this list):
2640    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
2641    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
2642    /// + request `token_request` The server end of the `BufferCollectionToken`
2643    ///   channel. The client retains the client end.
2644    pub fn r#attach_token(
2645        &self,
2646        mut payload: BufferCollectionAttachTokenRequest,
2647    ) -> Result<(), fidl::Error> {
2648        self.client.send::<BufferCollectionAttachTokenRequest>(
2649            &mut payload,
2650            0x46ac7d0008492982,
2651            fidl::encoding::DynamicFlags::FLEXIBLE,
2652        )
2653    }
2654
2655    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
2656    /// buffers have been allocated and only the specified number of buffers (or
2657    /// fewer) remain in the buffer collection.
2658    ///
2659    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
2660    /// client to wait until an old buffer collection is fully or mostly
2661    /// deallocated before attempting allocation of a new buffer collection. The
2662    /// eventpair is only signalled when the buffers of this collection have
2663    /// been fully deallocated (not just un-referenced by clients, but all the
2664    /// memory consumed by those buffers has been fully reclaimed/recycled), or
2665    /// when allocation or logical allocation fails for the tree or subtree
2666    /// including this [`fuchsia.sysmem2/BufferCollection`].
2667    ///
2668    /// The eventpair won't be signalled until allocation or logical allocation
2669    /// has completed; until then, the collection's current buffer count is
2670    /// ignored.
2671    ///
2672    /// If logical allocation fails for an attached subtree (using
2673    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
2674    /// eventpair will close during that failure regardless of the number of
2675    /// buffers potenitally allocated in the overall buffer collection. This is
2676    /// for logical allocation consistency with normal allocation.
2677    ///
2678    /// The lifetime signalled by this event includes asynchronous cleanup of
2679    /// allocated buffers, and this asynchronous cleanup cannot occur until all
2680    /// holders of VMO handles to the buffers have closed those VMO handles.
2681    /// Therefore, clients should take care not to become blocked forever
2682    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
2683    /// participants using the logical buffer collection (including the waiter
2684    /// itself) are less trusted, less reliable, or potentially blocked by the
2685    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
2686    /// for the client wait may be prudent, depending on details of how the
2687    /// collection and/or its VMOs are used or shared. Failure to allocate a
2688    /// new/replacement buffer collection is better than getting stuck forever.
2689    ///
2690    /// The sysmem server itself intentionally does not perform any waiting on
2691    /// already-failed collections' VMOs to finish cleaning up before attempting
2692    /// a new allocation, and the sysmem server intentionally doesn't retry
2693    /// allocation if a new allocation fails due to out of memory, even if that
2694    /// failure is potentially due to continued existence of an old collection's
2695    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
2696    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
2697    /// as long as the waiting client is careful to not create a deadlock.
2698    ///
2699    /// Continued existence of old collections that are still cleaning up is not
2700    /// the only reason that a new allocation may fail due to insufficient
2701    /// memory, even if the new allocation is allocating physically contiguous
2702    /// buffers. Overall system memory pressure can also be the cause of failure
2703    /// to allocate a new collection. See also
2704    /// [`fuchsia.memorypressure/Provider`].
2705    ///
2706    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
2707    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
2708    /// `eventpair` handle (server end) can be sent via more than one
2709    /// `AttachLifetimeTracking` message to different protocols, and the
2710    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
2711    /// the conditions are met (all holders of duplicates have closed their
2712    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
2713    /// client end can (also) be duplicated without preventing the
2714    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
2715    ///
2716    /// The server intentionally doesn't "trust" any signals set on the
2717    /// `server_end`. This mechanism intentionally uses only
2718    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
2719    /// "early", and is only set when all handles to the server end eventpair
2720    /// are closed. No meaning is associated with any of the other signals, and
2721    /// clients should ignore any other signal bits on either end of the
2722    /// `eventpair`.
2723    ///
2724    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
2725    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
2726    /// transfer without causing `BufferCollection` channel failure).
2727    ///
2728    /// All table fields are currently required.
2729    ///
2730    /// + request `server_end` This eventpair handle will be closed by the
2731    ///   sysmem server when buffers have been allocated initially and the
2732    ///   number of buffers is then less than or equal to `buffers_remaining`.
2733    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
2734    ///   fewer) buffers to be fully deallocated. A number greater than zero can
2735    ///   be useful in situations where a known number of buffers are
2736    ///   intentionally not closed so that the data can continue to be used,
2737    ///   such as for keeping the last available video frame displayed in the UI
2738    ///   even if the video stream was using protected output buffers. It's
2739    ///   outside the scope of the `BufferCollection` interface (at least for
2740    ///   now) to determine how many buffers may be held without closing, but
2741    ///   it'll typically be in the range 0-2.
2742    pub fn r#attach_lifetime_tracking(
2743        &self,
2744        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
2745    ) -> Result<(), fidl::Error> {
2746        self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
2747            &mut payload,
2748            0x3ecb510113116dcf,
2749            fidl::encoding::DynamicFlags::FLEXIBLE,
2750        )
2751    }
2752}
2753
2754#[derive(Debug, Clone)]
2755pub struct BufferCollectionProxy {
2756    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
2757}
2758
2759impl fidl::endpoints::Proxy for BufferCollectionProxy {
2760    type Protocol = BufferCollectionMarker;
2761
2762    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
2763        Self::new(inner)
2764    }
2765
2766    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
2767        self.client.into_channel().map_err(|client| Self { client })
2768    }
2769
2770    fn as_channel(&self) -> &::fidl::AsyncChannel {
2771        self.client.as_channel()
2772    }
2773}
2774
2775impl BufferCollectionProxy {
2776    /// Create a new Proxy for fuchsia.sysmem2/BufferCollection.
2777    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
2778        let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
2779        Self { client: fidl::client::Client::new(channel, protocol_name) }
2780    }
2781
2782    /// Get a Stream of events from the remote end of the protocol.
2783    ///
2784    /// # Panics
2785    ///
2786    /// Panics if the event stream was already taken.
2787    pub fn take_event_stream(&self) -> BufferCollectionEventStream {
2788        BufferCollectionEventStream { event_receiver: self.client.take_event_receiver() }
2789    }
2790
2791    /// Ensure that previous messages have been received server side. This is
2792    /// particularly useful after previous messages that created new tokens,
2793    /// because a token must be known to the sysmem server before sending the
2794    /// token to another participant.
2795    ///
2796    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
2797    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
2798    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
2799    /// to mitigate the possibility of a hostile/fake
2800    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
2801    /// Another way is to pass the token to
2802    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
2803    /// the token as part of exchanging it for a
2804    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
2805    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
2806    /// of stalling.
2807    ///
2808    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
2809    /// and then starting and completing a `Sync`, it's then safe to send the
2810    /// `BufferCollectionToken` client ends to other participants knowing the
2811    /// server will recognize the tokens when they're sent by the other
2812    /// participants to sysmem in a
2813    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
2814    /// efficient way to create tokens while avoiding unnecessary round trips.
2815    ///
2816    /// Other options include waiting for each
2817    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
2818    /// individually (using separate call to `Sync` after each), or calling
2819    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
2820    /// converted to a `BufferCollection` via
2821    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
2822    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
2823    /// the sync step and can create multiple tokens at once.
2824    pub fn r#sync(
2825        &self,
2826    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
2827        BufferCollectionProxyInterface::r#sync(self)
2828    }
2829
2830    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
2831    ///
2832    /// Normally a participant will convert a `BufferCollectionToken` into a
2833    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
2834    /// `Release` via the token (and then close the channel immediately or
2835    /// shortly later in response to server closing the server end), which
2836    /// avoids causing buffer collection failure. Without a prior `Release`,
2837    /// closing the `BufferCollectionToken` client end will cause buffer
2838    /// collection failure.
2839    ///
2840    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
2841    ///
2842    /// By default the server handles unexpected closure of a
2843    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
2844    /// first) by failing the buffer collection. Partly this is to expedite
2845    /// closing VMO handles to reclaim memory when any participant fails. If a
2846    /// participant would like to cleanly close a `BufferCollection` without
2847    /// causing buffer collection failure, the participant can send `Release`
2848    /// before closing the `BufferCollection` client end. The `Release` can
2849    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
2850    /// buffer collection won't require constraints from this node in order to
2851    /// allocate. If after `SetConstraints`, the constraints are retained and
2852    /// aggregated, despite the lack of `BufferCollection` connection at the
2853    /// time of constraints aggregation.
2854    ///
2855    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2856    ///
2857    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2858    /// end (without `Release` first) will trigger failure of the buffer
2859    /// collection. To close a `BufferCollectionTokenGroup` channel without
2860    /// failing the buffer collection, ensure that AllChildrenPresent() has been
2861    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2862    /// client end.
2863    ///
2864    /// If `Release` occurs before
2865    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2866    /// buffer collection will fail (triggered by reception of `Release` without
2867    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2868    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2869    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2870    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2871    /// close requires `AllChildrenPresent` (if not already sent), then
2872    /// `Release`, then close client end.
2873    ///
2874    /// If `Release` occurs after `AllChildrenPresent`, the children and all
2875    /// their constraints remain intact (just as they would if the
2876    /// `BufferCollectionTokenGroup` channel had remained open), and the client
2877    /// end close doesn't trigger buffer collection failure.
2878    ///
2879    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2880    ///
2881    /// For brevity, the per-channel-protocol paragraphs above ignore the
2882    /// separate failure domain created by
2883    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2884    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2885    /// unexpectedly closes (without `Release` first) and that client end is
2886    /// under a failure domain, instead of failing the whole buffer collection,
2887    /// the failure domain is failed, but the buffer collection itself is
2888    /// isolated from failure of the failure domain. Such failure domains can be
2889    /// nested, in which case only the inner-most failure domain in which the
2890    /// `Node` resides fails.
2891    pub fn r#release(&self) -> Result<(), fidl::Error> {
2892        BufferCollectionProxyInterface::r#release(self)
2893    }
2894
2895    /// Set a name for VMOs in this buffer collection.
2896    ///
2897    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2898    /// will be truncated to fit. The name of the vmo will be suffixed with the
2899    /// buffer index within the collection (if the suffix fits within
2900    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2901    /// listed in the inspect data.
2902    ///
2903    /// The name only affects VMOs allocated after the name is set; this call
2904    /// does not rename existing VMOs. If multiple clients set different names
2905    /// then the larger priority value will win. Setting a new name with the
2906    /// same priority as a prior name doesn't change the name.
2907    ///
2908    /// All table fields are currently required.
2909    ///
2910    /// + request `priority` The name is only set if this is the first `SetName`
2911    ///   or if `priority` is greater than any previous `priority` value in
2912    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
2913    /// + request `name` The name for VMOs created under this buffer collection.
2914    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2915        BufferCollectionProxyInterface::r#set_name(self, payload)
2916    }
2917
2918    /// Set information about the current client that can be used by sysmem to
2919    /// help diagnose leaking memory and allocation stalls waiting for a
2920    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2921    ///
2922    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2923    /// `Node`(s) derived from this `Node`, unless overriden by
2924    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2925    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2926    ///
2927    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2928    /// `Allocator` is the most efficient way to ensure that all
2929    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2930    /// set, and is also more efficient than separately sending the same debug
2931    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2932    /// created [`fuchsia.sysmem2/Node`].
2933    ///
2934    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2935    /// indicate which client is closing their channel first, leading to subtree
2936    /// failure (which can be normal if the purpose of the subtree is over, but
2937    /// if happening earlier than expected, the client-channel-specific name can
2938    /// help diagnose where the failure is first coming from, from sysmem's
2939    /// point of view).
2940    ///
2941    /// All table fields are currently required.
2942    ///
2943    /// + request `name` This can be an arbitrary string, but the current
2944    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
2945    /// + request `id` This can be an arbitrary id, but the current process ID
2946    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
2947    pub fn r#set_debug_client_info(
2948        &self,
2949        mut payload: &NodeSetDebugClientInfoRequest,
2950    ) -> Result<(), fidl::Error> {
2951        BufferCollectionProxyInterface::r#set_debug_client_info(self, payload)
2952    }
2953
2954    /// Sysmem logs a warning if sysmem hasn't seen
2955    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
2956    /// within 5 seconds after creation of a new collection.
2957    ///
2958    /// Clients can call this method to change when the log is printed. If
2959    /// multiple client set the deadline, it's unspecified which deadline will
2960    /// take effect.
2961    ///
2962    /// In most cases the default works well.
2963    ///
2964    /// All table fields are currently required.
2965    ///
2966    /// + request `deadline` The time at which sysmem will start trying to log
2967    ///   the warning, unless all constraints are with sysmem by then.
2968    pub fn r#set_debug_timeout_log_deadline(
2969        &self,
2970        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
2971    ) -> Result<(), fidl::Error> {
2972        BufferCollectionProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
2973    }
2974
2975    /// This enables verbose logging for the buffer collection.
2976    ///
2977    /// Verbose logging includes constraints set via
2978    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
2979    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
2980    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
2981    /// the tree of `Node`(s).
2982    ///
2983    /// Normally sysmem prints only a single line complaint when aggregation
2984    /// fails, with just the specific detailed reason that aggregation failed,
2985    /// with little surrounding context.  While this is often enough to diagnose
2986    /// a problem if only a small change was made and everything was working
2987    /// before the small change, it's often not particularly helpful for getting
2988    /// a new buffer collection to work for the first time.  Especially with
2989    /// more complex trees of nodes, involving things like
2990    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
2991    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
2992    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
2993    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
2994    /// looks like and why it's failing a logical allocation, or why a tree or
2995    /// subtree is failing sooner than expected.
2996    ///
2997    /// The intent of the extra logging is to be acceptable from a performance
2998    /// point of view, under the assumption that verbose logging is only enabled
2999    /// on a low number of buffer collections. If we're not tracking down a bug,
3000    /// we shouldn't send this message.
3001    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3002        BufferCollectionProxyInterface::r#set_verbose_logging(self)
3003    }
3004
3005    /// This gets a handle that can be used as a parameter to
3006    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
3007    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
3008    /// client obtained this handle from this `Node`.
3009    ///
3010    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
3011    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
3012    /// despite the two calls typically being on different channels.
3013    ///
3014    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
3015    ///
3016    /// All table fields are currently required.
3017    ///
3018    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
3019    ///   different `Node` channel, to prove that the client obtained the handle
3020    ///   from this `Node`.
3021    pub fn r#get_node_ref(
3022        &self,
3023    ) -> fidl::client::QueryResponseFut<
3024        NodeGetNodeRefResponse,
3025        fidl::encoding::DefaultFuchsiaResourceDialect,
3026    > {
3027        BufferCollectionProxyInterface::r#get_node_ref(self)
3028    }
3029
3030    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
3031    /// rooted at a different child token of a common parent
3032    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
3033    /// passed-in `node_ref`.
3034    ///
3035    /// This call is for assisting with admission control de-duplication, and
3036    /// with debugging.
3037    ///
3038    /// The `node_ref` must be obtained using
3039    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
3040    ///
3041    /// The `node_ref` can be a duplicated handle; it's not necessary to call
3042    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
3043    ///
3044    /// If a calling token may not actually be a valid token at all due to a
3045    /// potentially hostile/untrusted provider of the token, call
3046    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
3047    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
3048    /// never responds due to a calling token not being a real token (not really
3049    /// talking to sysmem).  Another option is to call
3050    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
3051    /// which also validates the token along with converting it to a
3052    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
3053    ///
3054    /// All table fields are currently required.
3055    ///
3056    /// - response `is_alternate`
3057    ///   - true: The first parent node in common between the calling node and
3058    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
3059    ///     that the calling `Node` and the `node_ref` `Node` will not have both
3060    ///     their constraints apply - rather sysmem will choose one or the other
3061    ///     of the constraints - never both.  This is because only one child of
3062    ///     a `BufferCollectionTokenGroup` is selected during logical
3063    ///     allocation, with only that one child's subtree contributing to
3064    ///     constraints aggregation.
3065    ///   - false: The first parent node in common between the calling `Node`
3066    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
3067    ///     Currently, this means the first parent node in common is a
3068    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
3069    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
3070    ///     `Node` may have both their constraints apply during constraints
3071    ///     aggregation of the logical allocation, if both `Node`(s) are
3072    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
3073    ///     this case, there is no `BufferCollectionTokenGroup` that will
3074    ///     directly prevent the two `Node`(s) from both being selected and
3075    ///     their constraints both aggregated, but even when false, one or both
3076    ///     `Node`(s) may still be eliminated from consideration if one or both
3077    ///     `Node`(s) has a direct or indirect parent
3078    ///     `BufferCollectionTokenGroup` which selects a child subtree other
3079    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
3080    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
3081    ///   associated with the same buffer collection as the calling `Node`.
3082    ///   Another reason for this error is if the `node_ref` is an
3083    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
3084    ///   a real `node_ref` obtained from `GetNodeRef`.
3085    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
3086    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
3087    ///   the needed rights expected on a real `node_ref`.
3088    /// * No other failing status codes are returned by this call.  However,
3089    ///   sysmem may add additional codes in future, so the client should have
3090    ///   sensible default handling for any failing status code.
3091    pub fn r#is_alternate_for(
3092        &self,
3093        mut payload: NodeIsAlternateForRequest,
3094    ) -> fidl::client::QueryResponseFut<
3095        NodeIsAlternateForResult,
3096        fidl::encoding::DefaultFuchsiaResourceDialect,
3097    > {
3098        BufferCollectionProxyInterface::r#is_alternate_for(self, payload)
3099    }
3100
3101    /// Get the buffer collection ID. This ID is also available from
3102    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
3103    /// within the collection).
3104    ///
3105    /// This call is mainly useful in situations where we can't convey a
3106    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
3107    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
3108    /// handle, which can be joined back up with a `BufferCollection` client end
3109    /// that was created via a different path. Prefer to convey a
3110    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
3111    ///
3112    /// Trusting a `buffer_collection_id` value from a source other than sysmem
3113    /// is analogous to trusting a koid value from a source other than zircon.
3114    /// Both should be avoided unless really necessary, and both require
3115    /// caution. In some situations it may be reasonable to refer to a
3116    /// pre-established `BufferCollection` by `buffer_collection_id` via a
3117    /// protocol for efficiency reasons, but an incoming value purporting to be
3118    /// a `buffer_collection_id` is not sufficient alone to justify granting the
3119    /// sender of the `buffer_collection_id` any capability. The sender must
3120    /// first prove to a receiver that the sender has/had a VMO or has/had a
3121    /// `BufferCollectionToken` to the same collection by sending a handle that
3122    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
3123    /// `buffer_collection_id` value. The receiver should take care to avoid
3124    /// assuming that a sender had a `BufferCollectionToken` in cases where the
3125    /// sender has only proven that the sender had a VMO.
3126    ///
3127    /// - response `buffer_collection_id` This ID is unique per buffer
3128    ///   collection per boot. Each buffer is uniquely identified by the
3129    ///   `buffer_collection_id` and `buffer_index` together.
3130    pub fn r#get_buffer_collection_id(
3131        &self,
3132    ) -> fidl::client::QueryResponseFut<
3133        NodeGetBufferCollectionIdResponse,
3134        fidl::encoding::DefaultFuchsiaResourceDialect,
3135    > {
3136        BufferCollectionProxyInterface::r#get_buffer_collection_id(self)
3137    }
3138
3139    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
3140    /// created after this message to weak, which means that a client's `Node`
3141    /// client end (or a child created after this message) is not alone
3142    /// sufficient to keep allocated VMOs alive.
3143    ///
3144    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
3145    /// `close_weak_asap`.
3146    ///
3147    /// This message is only permitted before the `Node` becomes ready for
3148    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
3149    ///   * `BufferCollectionToken`: any time
3150    ///   * `BufferCollection`: before `SetConstraints`
3151    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
3152    ///
3153    /// Currently, no conversion from strong `Node` to weak `Node` after ready
3154    /// for allocation is provided, but a client can simulate that by creating
3155    /// an additional `Node` before allocation and setting that additional
3156    /// `Node` to weak, and then potentially at some point later sending
3157    /// `Release` and closing the client end of the client's strong `Node`, but
3158    /// keeping the client's weak `Node`.
3159    ///
3160    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
3161    /// collection failure (all `Node` client end(s) will see
3162    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
3163    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
3164    /// this situation until all `Node`(s) are ready for allocation. For initial
3165    /// allocation to succeed, at least one strong `Node` is required to exist
3166    /// at allocation time, but after that client receives VMO handles, that
3167    /// client can `BufferCollection.Release` and close the client end without
3168    /// causing this type of failure.
3169    ///
3170    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
3171    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
3172    /// separately as appropriate.
3173    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
3174        BufferCollectionProxyInterface::r#set_weak(self)
3175    }
3176
3177    /// This indicates to sysmem that the client is prepared to pay attention to
3178    /// `close_weak_asap`.
3179    ///
3180    /// If sent, this message must be before
3181    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
3182    ///
3183    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
3184    /// send this message before `WaitForAllBuffersAllocated`, or a parent
3185    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
3186    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
3187    /// trigger buffer collection failure.
3188    ///
3189    /// This message is necessary because weak sysmem VMOs have not always been
3190    /// a thing, so older clients are not aware of the need to pay attention to
3191    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
3192    /// sysmem weak VMO handles asap. By having this message and requiring
3193    /// participants to indicate their acceptance of this aspect of the overall
3194    /// protocol, we avoid situations where an older client is delivered a weak
3195    /// VMO without any way for sysmem to get that VMO to close quickly later
3196    /// (and on a per-buffer basis).
3197    ///
3198    /// A participant that doesn't handle `close_weak_asap` and also doesn't
3199    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
3200    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
3201    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
3202    /// same participant has a child/delegate which does retrieve VMOs, that
3203    /// child/delegate will need to send `SetWeakOk` before
3204    /// `WaitForAllBuffersAllocated`.
3205    ///
3206    /// + request `for_child_nodes_also` If present and true, this means direct
3207    ///   child nodes of this node created after this message plus all
3208    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
3209    ///   those nodes. Any child node of this node that was created before this
3210    ///   message is not included. This setting is "sticky" in the sense that a
3211    ///   subsequent `SetWeakOk` without this bool set to true does not reset
3212    ///   the server-side bool. If this creates a problem for a participant, a
3213    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
3214    ///   tokens instead, as appropriate. A participant should only set
3215    ///   `for_child_nodes_also` true if the participant can really promise to
3216    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
3217    ///   weak VMO handles held by participants holding the corresponding child
3218    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
3219    ///   which are using sysmem(1) can be weak, despite the clients of those
3220    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
3221    ///   direct way to find out about `close_weak_asap`. This only applies to
3222    ///   descendents of this `Node` which are using sysmem(1), not to this
3223    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
3224    ///   token, which will fail allocation unless an ancestor of this `Node`
3225    ///   specified `for_child_nodes_also` true.
3226    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3227        BufferCollectionProxyInterface::r#set_weak_ok(self, payload)
3228    }
3229
3230    /// The server_end will be closed after this `Node` and any child nodes have
3231    /// have released their buffer counts, making those counts available for
3232    /// reservation by a different `Node` via
3233    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
3234    ///
3235    /// The `Node` buffer counts may not be released until the entire tree of
3236    /// `Node`(s) is closed or failed, because
3237    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
3238    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
3239    /// `Node` buffer counts remain reserved until the orphaned node is later
3240    /// cleaned up.
3241    ///
3242    /// If the `Node` exceeds a fairly large number of attached eventpair server
3243    /// ends, a log message will indicate this and the `Node` (and the
3244    /// appropriate) sub-tree will fail.
3245    ///
3246    /// The `server_end` will remain open when
3247    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
3248    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
3249    /// [`fuchsia.sysmem2/BufferCollection`].
3250    ///
3251    /// This message can also be used with a
3252    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
3253    pub fn r#attach_node_tracking(
3254        &self,
3255        mut payload: NodeAttachNodeTrackingRequest,
3256    ) -> Result<(), fidl::Error> {
3257        BufferCollectionProxyInterface::r#attach_node_tracking(self, payload)
3258    }
3259
3260    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
3261    /// collection.
3262    ///
3263    /// A participant may only call
3264    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
3265    /// [`fuchsia.sysmem2/BufferCollection`].
3266    ///
3267    /// For buffer allocation to be attempted, all holders of a
3268    /// `BufferCollection` client end need to call `SetConstraints` before
3269    /// sysmem will attempt to allocate buffers.
3270    ///
3271    /// + request `constraints` These are the constraints on the buffer
3272    ///   collection imposed by the sending client/participant.  The
3273    ///   `constraints` field is not required to be set. If not set, the client
3274    ///   is not setting any actual constraints, but is indicating that the
3275    ///   client has no constraints to set. A client that doesn't set the
3276    ///   `constraints` field won't receive any VMO handles, but can still find
3277    ///   out how many buffers were allocated and can still refer to buffers by
3278    ///   their `buffer_index`.
3279    pub fn r#set_constraints(
3280        &self,
3281        mut payload: BufferCollectionSetConstraintsRequest,
3282    ) -> Result<(), fidl::Error> {
3283        BufferCollectionProxyInterface::r#set_constraints(self, payload)
3284    }
3285
3286    /// Wait until all buffers are allocated.
3287    ///
3288    /// This FIDL call completes when buffers have been allocated, or completes
3289    /// with some failure detail if allocation has been attempted but failed.
3290    ///
3291    /// The following must occur before buffers will be allocated:
3292    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
3293    ///     collection must be turned in via `BindSharedCollection` to get a
3294    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
3295    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
3296    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
3297    ///     to them.
3298    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
3299    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
3300    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
3301    ///     sent to them.
3302    ///
3303    /// - result `buffer_collection_info` The VMO handles and other related
3304    ///   info.
3305    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
3306    ///   cannot be fulfilled due to resource exhaustion.
3307    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
3308    ///   malformed.
3309    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
3310    ///   request is valid but cannot be satisfied, perhaps due to hardware
3311    ///   limitations. This can happen if participants have incompatible
3312    ///   constraints (empty intersection, roughly speaking). See the log for
3313    ///   more info. In cases where a participant could potentially be treated
3314    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
3315    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
3316    ///   error code if there aren't enough buffers in the pre-existing
3317    ///   collection to satisfy the constraints set on the attached token and
3318    ///   any sub-tree of tokens derived from the attached token.
3319    pub fn r#wait_for_all_buffers_allocated(
3320        &self,
3321    ) -> fidl::client::QueryResponseFut<
3322        BufferCollectionWaitForAllBuffersAllocatedResult,
3323        fidl::encoding::DefaultFuchsiaResourceDialect,
3324    > {
3325        BufferCollectionProxyInterface::r#wait_for_all_buffers_allocated(self)
3326    }
3327
3328    /// Checks whether all the buffers have been allocated, in a polling
3329    /// fashion.
3330    ///
3331    /// * If the buffer collection has been allocated, returns success.
3332    /// * If the buffer collection failed allocation, returns the same
3333    ///   [`fuchsia.sysmem2/Error`] as
3334    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
3335    ///   return.
3336    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
3337    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
3338    ///   would not respond quickly.
3339    pub fn r#check_all_buffers_allocated(
3340        &self,
3341    ) -> fidl::client::QueryResponseFut<
3342        BufferCollectionCheckAllBuffersAllocatedResult,
3343        fidl::encoding::DefaultFuchsiaResourceDialect,
3344    > {
3345        BufferCollectionProxyInterface::r#check_all_buffers_allocated(self)
3346    }
3347
3348    /// Create a new token to add a new participant to an existing logical
3349    /// buffer collection, if the existing collection's buffer counts,
3350    /// constraints, and participants allow.
3351    ///
3352    /// This can be useful in replacing a failed participant, and/or in
3353    /// adding/re-adding a participant after buffers have already been
3354    /// allocated.
3355    ///
3356    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
3357    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
3358    /// goes through the normal procedure of setting constraints or closing
3359    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
3360    /// clients' point of view, despite the possibility that all the buffers
3361    /// were actually allocated previously. This process is called "logical
3362    /// allocation". Most instances of "allocation" in docs for other messages
3363    /// can also be read as "allocation or logical allocation" while remaining
3364    /// valid, but we just say "allocation" in most places for brevity/clarity
3365    /// of explanation, with the details of "logical allocation" left for the
3366    /// docs here on `AttachToken`.
3367    ///
3368    /// Failure of an attached `Node` does not propagate to the parent of the
3369    /// attached `Node`. More generally, failure of a child `Node` is blocked
3370    /// from reaching its parent `Node` if the child is attached, or if the
3371    /// child is dispensable and the failure occurred after logical allocation
3372    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
3373    ///
3374    /// A participant may in some scenarios choose to initially use a
3375    /// dispensable token for a given instance of a delegate participant, and
3376    /// then later if the first instance of that delegate participant fails, a
3377    /// new second instance of that delegate participant my be given a token
3378    /// created with `AttachToken`.
3379    ///
3380    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
3381    /// client end, the token acts like any other token. The client can
3382    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
3383    /// and can send the token to a different process/participant. The
3384    /// `BufferCollectionToken` `Node` should be converted to a
3385    /// `BufferCollection` `Node` as normal by sending
3386    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
3387    /// without causing subtree failure by sending
3388    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
3389    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
3390    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
3391    /// the `BufferCollection`.
3392    ///
3393    /// Within the subtree, a success result from
3394    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
3395    /// the subtree participants' constraints were satisfiable using the
3396    /// already-existing buffer collection, the already-established
3397    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
3398    /// constraints, and the already-existing other participants (already added
3399    /// via successful logical allocation) and their specified buffer counts in
3400    /// their constraints. A failure result means the new participants'
3401    /// constraints cannot be satisfied using the existing buffer collection and
3402    /// its already-added participants. Creating a new collection instead may
3403    /// allow all participants' constraints to be satisfied, assuming
3404    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
3405    /// used.
3406    ///
3407    /// A token created with `AttachToken` performs constraints aggregation with
3408    /// all constraints currently in effect on the buffer collection, plus the
3409    /// attached token under consideration plus child tokens under the attached
3410    /// token which are not themselves an attached token or under such a token.
3411    /// Further subtrees under this subtree are considered for logical
3412    /// allocation only after this subtree has completed logical allocation.
3413    ///
3414    /// Assignment of existing buffers to participants'
3415    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
3416    /// etc is first-come first-served, but a child can't logically allocate
3417    /// before all its parents have sent `SetConstraints`.
3418    ///
3419    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
3420    /// in contrast to `AttachToken`, has the created token `Node` + child
3421    /// `Node`(s) (in the created subtree but not in any subtree under this
3422    /// subtree) participate in constraints aggregation along with its parent
3423    /// during the parent's allocation or logical allocation.
3424    ///
3425    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
3426    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
3427    /// sysmem before the new token can be passed to `BindSharedCollection`. The
3428    /// `Sync` of the new token can be accomplished with
3429    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
3430    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
3431    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
3432    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
3433    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
3434    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
3435    /// created token, to also sync those additional tokens to sysmem using a
3436    /// single round-trip.
3437    ///
3438    /// All table fields are currently required.
3439    ///
3440    /// + request `rights_attentuation_mask` This allows attenuating the VMO
3441    ///   rights of the subtree. These values for `rights_attenuation_mask`
3442    ///   result in no attenuation (note that 0 is not on this list):
3443    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
3444    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
3445    /// + request `token_request` The server end of the `BufferCollectionToken`
3446    ///   channel. The client retains the client end.
3447    pub fn r#attach_token(
3448        &self,
3449        mut payload: BufferCollectionAttachTokenRequest,
3450    ) -> Result<(), fidl::Error> {
3451        BufferCollectionProxyInterface::r#attach_token(self, payload)
3452    }
3453
3454    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
3455    /// buffers have been allocated and only the specified number of buffers (or
3456    /// fewer) remain in the buffer collection.
3457    ///
3458    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
3459    /// client to wait until an old buffer collection is fully or mostly
3460    /// deallocated before attempting allocation of a new buffer collection. The
3461    /// eventpair is only signalled when the buffers of this collection have
3462    /// been fully deallocated (not just un-referenced by clients, but all the
3463    /// memory consumed by those buffers has been fully reclaimed/recycled), or
3464    /// when allocation or logical allocation fails for the tree or subtree
3465    /// including this [`fuchsia.sysmem2/BufferCollection`].
3466    ///
3467    /// The eventpair won't be signalled until allocation or logical allocation
3468    /// has completed; until then, the collection's current buffer count is
3469    /// ignored.
3470    ///
3471    /// If logical allocation fails for an attached subtree (using
3472    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
3473    /// eventpair will close during that failure regardless of the number of
3474    /// buffers potenitally allocated in the overall buffer collection. This is
3475    /// for logical allocation consistency with normal allocation.
3476    ///
3477    /// The lifetime signalled by this event includes asynchronous cleanup of
3478    /// allocated buffers, and this asynchronous cleanup cannot occur until all
3479    /// holders of VMO handles to the buffers have closed those VMO handles.
3480    /// Therefore, clients should take care not to become blocked forever
3481    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
3482    /// participants using the logical buffer collection (including the waiter
3483    /// itself) are less trusted, less reliable, or potentially blocked by the
3484    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
3485    /// for the client wait may be prudent, depending on details of how the
3486    /// collection and/or its VMOs are used or shared. Failure to allocate a
3487    /// new/replacement buffer collection is better than getting stuck forever.
3488    ///
3489    /// The sysmem server itself intentionally does not perform any waiting on
3490    /// already-failed collections' VMOs to finish cleaning up before attempting
3491    /// a new allocation, and the sysmem server intentionally doesn't retry
3492    /// allocation if a new allocation fails due to out of memory, even if that
3493    /// failure is potentially due to continued existence of an old collection's
3494    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
3495    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
3496    /// as long as the waiting client is careful to not create a deadlock.
3497    ///
3498    /// Continued existence of old collections that are still cleaning up is not
3499    /// the only reason that a new allocation may fail due to insufficient
3500    /// memory, even if the new allocation is allocating physically contiguous
3501    /// buffers. Overall system memory pressure can also be the cause of failure
3502    /// to allocate a new collection. See also
3503    /// [`fuchsia.memorypressure/Provider`].
3504    ///
3505    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
3506    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
3507    /// `eventpair` handle (server end) can be sent via more than one
3508    /// `AttachLifetimeTracking` message to different protocols, and the
3509    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
3510    /// the conditions are met (all holders of duplicates have closed their
3511    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
3512    /// client end can (also) be duplicated without preventing the
3513    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
3514    ///
3515    /// The server intentionally doesn't "trust" any signals set on the
3516    /// `server_end`. This mechanism intentionally uses only
3517    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
3518    /// "early", and is only set when all handles to the server end eventpair
3519    /// are closed. No meaning is associated with any of the other signals, and
3520    /// clients should ignore any other signal bits on either end of the
3521    /// `eventpair`.
3522    ///
3523    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
3524    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
3525    /// transfer without causing `BufferCollection` channel failure).
3526    ///
3527    /// All table fields are currently required.
3528    ///
3529    /// + request `server_end` This eventpair handle will be closed by the
3530    ///   sysmem server when buffers have been allocated initially and the
3531    ///   number of buffers is then less than or equal to `buffers_remaining`.
3532    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
3533    ///   fewer) buffers to be fully deallocated. A number greater than zero can
3534    ///   be useful in situations where a known number of buffers are
3535    ///   intentionally not closed so that the data can continue to be used,
3536    ///   such as for keeping the last available video frame displayed in the UI
3537    ///   even if the video stream was using protected output buffers. It's
3538    ///   outside the scope of the `BufferCollection` interface (at least for
3539    ///   now) to determine how many buffers may be held without closing, but
3540    ///   it'll typically be in the range 0-2.
3541    pub fn r#attach_lifetime_tracking(
3542        &self,
3543        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3544    ) -> Result<(), fidl::Error> {
3545        BufferCollectionProxyInterface::r#attach_lifetime_tracking(self, payload)
3546    }
3547}
3548
3549impl BufferCollectionProxyInterface for BufferCollectionProxy {
3550    type SyncResponseFut =
3551        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
3552    fn r#sync(&self) -> Self::SyncResponseFut {
3553        fn _decode(
3554            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3555        ) -> Result<(), fidl::Error> {
3556            let _response = fidl::client::decode_transaction_body::<
3557                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
3558                fidl::encoding::DefaultFuchsiaResourceDialect,
3559                0x11ac2555cf575b54,
3560            >(_buf?)?
3561            .into_result::<BufferCollectionMarker>("sync")?;
3562            Ok(_response)
3563        }
3564        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
3565            (),
3566            0x11ac2555cf575b54,
3567            fidl::encoding::DynamicFlags::FLEXIBLE,
3568            _decode,
3569        )
3570    }
3571
3572    fn r#release(&self) -> Result<(), fidl::Error> {
3573        self.client.send::<fidl::encoding::EmptyPayload>(
3574            (),
3575            0x6a5cae7d6d6e04c6,
3576            fidl::encoding::DynamicFlags::FLEXIBLE,
3577        )
3578    }
3579
3580    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
3581        self.client.send::<NodeSetNameRequest>(
3582            payload,
3583            0xb41f1624f48c1e9,
3584            fidl::encoding::DynamicFlags::FLEXIBLE,
3585        )
3586    }
3587
3588    fn r#set_debug_client_info(
3589        &self,
3590        mut payload: &NodeSetDebugClientInfoRequest,
3591    ) -> Result<(), fidl::Error> {
3592        self.client.send::<NodeSetDebugClientInfoRequest>(
3593            payload,
3594            0x5cde8914608d99b1,
3595            fidl::encoding::DynamicFlags::FLEXIBLE,
3596        )
3597    }
3598
3599    fn r#set_debug_timeout_log_deadline(
3600        &self,
3601        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3602    ) -> Result<(), fidl::Error> {
3603        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
3604            payload,
3605            0x716b0af13d5c0806,
3606            fidl::encoding::DynamicFlags::FLEXIBLE,
3607        )
3608    }
3609
3610    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3611        self.client.send::<fidl::encoding::EmptyPayload>(
3612            (),
3613            0x5209c77415b4dfad,
3614            fidl::encoding::DynamicFlags::FLEXIBLE,
3615        )
3616    }
3617
3618    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
3619        NodeGetNodeRefResponse,
3620        fidl::encoding::DefaultFuchsiaResourceDialect,
3621    >;
3622    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
3623        fn _decode(
3624            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3625        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
3626            let _response = fidl::client::decode_transaction_body::<
3627                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
3628                fidl::encoding::DefaultFuchsiaResourceDialect,
3629                0x5b3d0e51614df053,
3630            >(_buf?)?
3631            .into_result::<BufferCollectionMarker>("get_node_ref")?;
3632            Ok(_response)
3633        }
3634        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
3635            (),
3636            0x5b3d0e51614df053,
3637            fidl::encoding::DynamicFlags::FLEXIBLE,
3638            _decode,
3639        )
3640    }
3641
3642    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
3643        NodeIsAlternateForResult,
3644        fidl::encoding::DefaultFuchsiaResourceDialect,
3645    >;
3646    fn r#is_alternate_for(
3647        &self,
3648        mut payload: NodeIsAlternateForRequest,
3649    ) -> Self::IsAlternateForResponseFut {
3650        fn _decode(
3651            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3652        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
3653            let _response = fidl::client::decode_transaction_body::<
3654                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
3655                fidl::encoding::DefaultFuchsiaResourceDialect,
3656                0x3a58e00157e0825,
3657            >(_buf?)?
3658            .into_result::<BufferCollectionMarker>("is_alternate_for")?;
3659            Ok(_response.map(|x| x))
3660        }
3661        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
3662            &mut payload,
3663            0x3a58e00157e0825,
3664            fidl::encoding::DynamicFlags::FLEXIBLE,
3665            _decode,
3666        )
3667    }
3668
3669    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
3670        NodeGetBufferCollectionIdResponse,
3671        fidl::encoding::DefaultFuchsiaResourceDialect,
3672    >;
3673    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
3674        fn _decode(
3675            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3676        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
3677            let _response = fidl::client::decode_transaction_body::<
3678                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
3679                fidl::encoding::DefaultFuchsiaResourceDialect,
3680                0x77d19a494b78ba8c,
3681            >(_buf?)?
3682            .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
3683            Ok(_response)
3684        }
3685        self.client.send_query_and_decode::<
3686            fidl::encoding::EmptyPayload,
3687            NodeGetBufferCollectionIdResponse,
3688        >(
3689            (),
3690            0x77d19a494b78ba8c,
3691            fidl::encoding::DynamicFlags::FLEXIBLE,
3692            _decode,
3693        )
3694    }
3695
3696    fn r#set_weak(&self) -> Result<(), fidl::Error> {
3697        self.client.send::<fidl::encoding::EmptyPayload>(
3698            (),
3699            0x22dd3ea514eeffe1,
3700            fidl::encoding::DynamicFlags::FLEXIBLE,
3701        )
3702    }
3703
3704    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3705        self.client.send::<NodeSetWeakOkRequest>(
3706            &mut payload,
3707            0x38a44fc4d7724be9,
3708            fidl::encoding::DynamicFlags::FLEXIBLE,
3709        )
3710    }
3711
3712    fn r#attach_node_tracking(
3713        &self,
3714        mut payload: NodeAttachNodeTrackingRequest,
3715    ) -> Result<(), fidl::Error> {
3716        self.client.send::<NodeAttachNodeTrackingRequest>(
3717            &mut payload,
3718            0x3f22f2a293d3cdac,
3719            fidl::encoding::DynamicFlags::FLEXIBLE,
3720        )
3721    }
3722
3723    fn r#set_constraints(
3724        &self,
3725        mut payload: BufferCollectionSetConstraintsRequest,
3726    ) -> Result<(), fidl::Error> {
3727        self.client.send::<BufferCollectionSetConstraintsRequest>(
3728            &mut payload,
3729            0x1fde0f19d650197b,
3730            fidl::encoding::DynamicFlags::FLEXIBLE,
3731        )
3732    }
3733
3734    type WaitForAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3735        BufferCollectionWaitForAllBuffersAllocatedResult,
3736        fidl::encoding::DefaultFuchsiaResourceDialect,
3737    >;
3738    fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut {
3739        fn _decode(
3740            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3741        ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
3742            let _response = fidl::client::decode_transaction_body::<
3743                fidl::encoding::FlexibleResultType<
3744                    BufferCollectionWaitForAllBuffersAllocatedResponse,
3745                    Error,
3746                >,
3747                fidl::encoding::DefaultFuchsiaResourceDialect,
3748                0x62300344b61404e,
3749            >(_buf?)?
3750            .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
3751            Ok(_response.map(|x| x))
3752        }
3753        self.client.send_query_and_decode::<
3754            fidl::encoding::EmptyPayload,
3755            BufferCollectionWaitForAllBuffersAllocatedResult,
3756        >(
3757            (),
3758            0x62300344b61404e,
3759            fidl::encoding::DynamicFlags::FLEXIBLE,
3760            _decode,
3761        )
3762    }
3763
3764    type CheckAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3765        BufferCollectionCheckAllBuffersAllocatedResult,
3766        fidl::encoding::DefaultFuchsiaResourceDialect,
3767    >;
3768    fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut {
3769        fn _decode(
3770            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3771        ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
3772            let _response = fidl::client::decode_transaction_body::<
3773                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
3774                fidl::encoding::DefaultFuchsiaResourceDialect,
3775                0x35a5fe77ce939c10,
3776            >(_buf?)?
3777            .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
3778            Ok(_response.map(|x| x))
3779        }
3780        self.client.send_query_and_decode::<
3781            fidl::encoding::EmptyPayload,
3782            BufferCollectionCheckAllBuffersAllocatedResult,
3783        >(
3784            (),
3785            0x35a5fe77ce939c10,
3786            fidl::encoding::DynamicFlags::FLEXIBLE,
3787            _decode,
3788        )
3789    }
3790
3791    fn r#attach_token(
3792        &self,
3793        mut payload: BufferCollectionAttachTokenRequest,
3794    ) -> Result<(), fidl::Error> {
3795        self.client.send::<BufferCollectionAttachTokenRequest>(
3796            &mut payload,
3797            0x46ac7d0008492982,
3798            fidl::encoding::DynamicFlags::FLEXIBLE,
3799        )
3800    }
3801
3802    fn r#attach_lifetime_tracking(
3803        &self,
3804        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3805    ) -> Result<(), fidl::Error> {
3806        self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
3807            &mut payload,
3808            0x3ecb510113116dcf,
3809            fidl::encoding::DynamicFlags::FLEXIBLE,
3810        )
3811    }
3812}
3813
3814pub struct BufferCollectionEventStream {
3815    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
3816}
3817
3818impl std::marker::Unpin for BufferCollectionEventStream {}
3819
3820impl futures::stream::FusedStream for BufferCollectionEventStream {
3821    fn is_terminated(&self) -> bool {
3822        self.event_receiver.is_terminated()
3823    }
3824}
3825
3826impl futures::Stream for BufferCollectionEventStream {
3827    type Item = Result<BufferCollectionEvent, fidl::Error>;
3828
3829    fn poll_next(
3830        mut self: std::pin::Pin<&mut Self>,
3831        cx: &mut std::task::Context<'_>,
3832    ) -> std::task::Poll<Option<Self::Item>> {
3833        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
3834            &mut self.event_receiver,
3835            cx
3836        )?) {
3837            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionEvent::decode(buf))),
3838            None => std::task::Poll::Ready(None),
3839        }
3840    }
3841}
3842
3843#[derive(Debug)]
3844pub enum BufferCollectionEvent {
3845    #[non_exhaustive]
3846    _UnknownEvent {
3847        /// Ordinal of the event that was sent.
3848        ordinal: u64,
3849    },
3850}
3851
3852impl BufferCollectionEvent {
3853    /// Decodes a message buffer as a [`BufferCollectionEvent`].
3854    fn decode(
3855        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
3856    ) -> Result<BufferCollectionEvent, fidl::Error> {
3857        let (bytes, _handles) = buf.split_mut();
3858        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3859        debug_assert_eq!(tx_header.tx_id, 0);
3860        match tx_header.ordinal {
3861            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
3862                Ok(BufferCollectionEvent::_UnknownEvent { ordinal: tx_header.ordinal })
3863            }
3864            _ => Err(fidl::Error::UnknownOrdinal {
3865                ordinal: tx_header.ordinal,
3866                protocol_name:
3867                    <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
3868            }),
3869        }
3870    }
3871}
3872
3873/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollection.
3874pub struct BufferCollectionRequestStream {
3875    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3876    is_terminated: bool,
3877}
3878
3879impl std::marker::Unpin for BufferCollectionRequestStream {}
3880
3881impl futures::stream::FusedStream for BufferCollectionRequestStream {
3882    fn is_terminated(&self) -> bool {
3883        self.is_terminated
3884    }
3885}
3886
3887impl fidl::endpoints::RequestStream for BufferCollectionRequestStream {
3888    type Protocol = BufferCollectionMarker;
3889    type ControlHandle = BufferCollectionControlHandle;
3890
3891    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
3892        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
3893    }
3894
3895    fn control_handle(&self) -> Self::ControlHandle {
3896        BufferCollectionControlHandle { inner: self.inner.clone() }
3897    }
3898
3899    fn into_inner(
3900        self,
3901    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
3902    {
3903        (self.inner, self.is_terminated)
3904    }
3905
3906    fn from_inner(
3907        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3908        is_terminated: bool,
3909    ) -> Self {
3910        Self { inner, is_terminated }
3911    }
3912}
3913
3914impl futures::Stream for BufferCollectionRequestStream {
3915    type Item = Result<BufferCollectionRequest, fidl::Error>;
3916
3917    fn poll_next(
3918        mut self: std::pin::Pin<&mut Self>,
3919        cx: &mut std::task::Context<'_>,
3920    ) -> std::task::Poll<Option<Self::Item>> {
3921        let this = &mut *self;
3922        if this.inner.check_shutdown(cx) {
3923            this.is_terminated = true;
3924            return std::task::Poll::Ready(None);
3925        }
3926        if this.is_terminated {
3927            panic!("polled BufferCollectionRequestStream after completion");
3928        }
3929        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
3930            |bytes, handles| {
3931                match this.inner.channel().read_etc(cx, bytes, handles) {
3932                    std::task::Poll::Ready(Ok(())) => {}
3933                    std::task::Poll::Pending => return std::task::Poll::Pending,
3934                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
3935                        this.is_terminated = true;
3936                        return std::task::Poll::Ready(None);
3937                    }
3938                    std::task::Poll::Ready(Err(e)) => {
3939                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
3940                            e.into(),
3941                        ))))
3942                    }
3943                }
3944
3945                // A message has been received from the channel
3946                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3947
3948                std::task::Poll::Ready(Some(match header.ordinal {
3949                    0x11ac2555cf575b54 => {
3950                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
3951                        let mut req = fidl::new_empty!(
3952                            fidl::encoding::EmptyPayload,
3953                            fidl::encoding::DefaultFuchsiaResourceDialect
3954                        );
3955                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
3956                        let control_handle =
3957                            BufferCollectionControlHandle { inner: this.inner.clone() };
3958                        Ok(BufferCollectionRequest::Sync {
3959                            responder: BufferCollectionSyncResponder {
3960                                control_handle: std::mem::ManuallyDrop::new(control_handle),
3961                                tx_id: header.tx_id,
3962                            },
3963                        })
3964                    }
3965                    0x6a5cae7d6d6e04c6 => {
3966                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
3967                        let mut req = fidl::new_empty!(
3968                            fidl::encoding::EmptyPayload,
3969                            fidl::encoding::DefaultFuchsiaResourceDialect
3970                        );
3971                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
3972                        let control_handle =
3973                            BufferCollectionControlHandle { inner: this.inner.clone() };
3974                        Ok(BufferCollectionRequest::Release { control_handle })
3975                    }
3976                    0xb41f1624f48c1e9 => {
3977                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
3978                        let mut req = fidl::new_empty!(
3979                            NodeSetNameRequest,
3980                            fidl::encoding::DefaultFuchsiaResourceDialect
3981                        );
3982                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
3983                        let control_handle =
3984                            BufferCollectionControlHandle { inner: this.inner.clone() };
3985                        Ok(BufferCollectionRequest::SetName { payload: req, control_handle })
3986                    }
3987                    0x5cde8914608d99b1 => {
3988                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
3989                        let mut req = fidl::new_empty!(
3990                            NodeSetDebugClientInfoRequest,
3991                            fidl::encoding::DefaultFuchsiaResourceDialect
3992                        );
3993                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
3994                        let control_handle =
3995                            BufferCollectionControlHandle { inner: this.inner.clone() };
3996                        Ok(BufferCollectionRequest::SetDebugClientInfo {
3997                            payload: req,
3998                            control_handle,
3999                        })
4000                    }
4001                    0x716b0af13d5c0806 => {
4002                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4003                        let mut req = fidl::new_empty!(
4004                            NodeSetDebugTimeoutLogDeadlineRequest,
4005                            fidl::encoding::DefaultFuchsiaResourceDialect
4006                        );
4007                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
4008                        let control_handle =
4009                            BufferCollectionControlHandle { inner: this.inner.clone() };
4010                        Ok(BufferCollectionRequest::SetDebugTimeoutLogDeadline {
4011                            payload: req,
4012                            control_handle,
4013                        })
4014                    }
4015                    0x5209c77415b4dfad => {
4016                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4017                        let mut req = fidl::new_empty!(
4018                            fidl::encoding::EmptyPayload,
4019                            fidl::encoding::DefaultFuchsiaResourceDialect
4020                        );
4021                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4022                        let control_handle =
4023                            BufferCollectionControlHandle { inner: this.inner.clone() };
4024                        Ok(BufferCollectionRequest::SetVerboseLogging { control_handle })
4025                    }
4026                    0x5b3d0e51614df053 => {
4027                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4028                        let mut req = fidl::new_empty!(
4029                            fidl::encoding::EmptyPayload,
4030                            fidl::encoding::DefaultFuchsiaResourceDialect
4031                        );
4032                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4033                        let control_handle =
4034                            BufferCollectionControlHandle { inner: this.inner.clone() };
4035                        Ok(BufferCollectionRequest::GetNodeRef {
4036                            responder: BufferCollectionGetNodeRefResponder {
4037                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4038                                tx_id: header.tx_id,
4039                            },
4040                        })
4041                    }
4042                    0x3a58e00157e0825 => {
4043                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4044                        let mut req = fidl::new_empty!(
4045                            NodeIsAlternateForRequest,
4046                            fidl::encoding::DefaultFuchsiaResourceDialect
4047                        );
4048                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
4049                        let control_handle =
4050                            BufferCollectionControlHandle { inner: this.inner.clone() };
4051                        Ok(BufferCollectionRequest::IsAlternateFor {
4052                            payload: req,
4053                            responder: BufferCollectionIsAlternateForResponder {
4054                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4055                                tx_id: header.tx_id,
4056                            },
4057                        })
4058                    }
4059                    0x77d19a494b78ba8c => {
4060                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4061                        let mut req = fidl::new_empty!(
4062                            fidl::encoding::EmptyPayload,
4063                            fidl::encoding::DefaultFuchsiaResourceDialect
4064                        );
4065                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4066                        let control_handle =
4067                            BufferCollectionControlHandle { inner: this.inner.clone() };
4068                        Ok(BufferCollectionRequest::GetBufferCollectionId {
4069                            responder: BufferCollectionGetBufferCollectionIdResponder {
4070                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4071                                tx_id: header.tx_id,
4072                            },
4073                        })
4074                    }
4075                    0x22dd3ea514eeffe1 => {
4076                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4077                        let mut req = fidl::new_empty!(
4078                            fidl::encoding::EmptyPayload,
4079                            fidl::encoding::DefaultFuchsiaResourceDialect
4080                        );
4081                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4082                        let control_handle =
4083                            BufferCollectionControlHandle { inner: this.inner.clone() };
4084                        Ok(BufferCollectionRequest::SetWeak { control_handle })
4085                    }
4086                    0x38a44fc4d7724be9 => {
4087                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4088                        let mut req = fidl::new_empty!(
4089                            NodeSetWeakOkRequest,
4090                            fidl::encoding::DefaultFuchsiaResourceDialect
4091                        );
4092                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
4093                        let control_handle =
4094                            BufferCollectionControlHandle { inner: this.inner.clone() };
4095                        Ok(BufferCollectionRequest::SetWeakOk { payload: req, control_handle })
4096                    }
4097                    0x3f22f2a293d3cdac => {
4098                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4099                        let mut req = fidl::new_empty!(
4100                            NodeAttachNodeTrackingRequest,
4101                            fidl::encoding::DefaultFuchsiaResourceDialect
4102                        );
4103                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4104                        let control_handle =
4105                            BufferCollectionControlHandle { inner: this.inner.clone() };
4106                        Ok(BufferCollectionRequest::AttachNodeTracking {
4107                            payload: req,
4108                            control_handle,
4109                        })
4110                    }
4111                    0x1fde0f19d650197b => {
4112                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4113                        let mut req = fidl::new_empty!(
4114                            BufferCollectionSetConstraintsRequest,
4115                            fidl::encoding::DefaultFuchsiaResourceDialect
4116                        );
4117                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionSetConstraintsRequest>(&header, _body_bytes, handles, &mut req)?;
4118                        let control_handle =
4119                            BufferCollectionControlHandle { inner: this.inner.clone() };
4120                        Ok(BufferCollectionRequest::SetConstraints { payload: req, control_handle })
4121                    }
4122                    0x62300344b61404e => {
4123                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4124                        let mut req = fidl::new_empty!(
4125                            fidl::encoding::EmptyPayload,
4126                            fidl::encoding::DefaultFuchsiaResourceDialect
4127                        );
4128                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4129                        let control_handle =
4130                            BufferCollectionControlHandle { inner: this.inner.clone() };
4131                        Ok(BufferCollectionRequest::WaitForAllBuffersAllocated {
4132                            responder: BufferCollectionWaitForAllBuffersAllocatedResponder {
4133                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4134                                tx_id: header.tx_id,
4135                            },
4136                        })
4137                    }
4138                    0x35a5fe77ce939c10 => {
4139                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4140                        let mut req = fidl::new_empty!(
4141                            fidl::encoding::EmptyPayload,
4142                            fidl::encoding::DefaultFuchsiaResourceDialect
4143                        );
4144                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4145                        let control_handle =
4146                            BufferCollectionControlHandle { inner: this.inner.clone() };
4147                        Ok(BufferCollectionRequest::CheckAllBuffersAllocated {
4148                            responder: BufferCollectionCheckAllBuffersAllocatedResponder {
4149                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4150                                tx_id: header.tx_id,
4151                            },
4152                        })
4153                    }
4154                    0x46ac7d0008492982 => {
4155                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4156                        let mut req = fidl::new_empty!(
4157                            BufferCollectionAttachTokenRequest,
4158                            fidl::encoding::DefaultFuchsiaResourceDialect
4159                        );
4160                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachTokenRequest>(&header, _body_bytes, handles, &mut req)?;
4161                        let control_handle =
4162                            BufferCollectionControlHandle { inner: this.inner.clone() };
4163                        Ok(BufferCollectionRequest::AttachToken { payload: req, control_handle })
4164                    }
4165                    0x3ecb510113116dcf => {
4166                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4167                        let mut req = fidl::new_empty!(
4168                            BufferCollectionAttachLifetimeTrackingRequest,
4169                            fidl::encoding::DefaultFuchsiaResourceDialect
4170                        );
4171                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachLifetimeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4172                        let control_handle =
4173                            BufferCollectionControlHandle { inner: this.inner.clone() };
4174                        Ok(BufferCollectionRequest::AttachLifetimeTracking {
4175                            payload: req,
4176                            control_handle,
4177                        })
4178                    }
4179                    _ if header.tx_id == 0
4180                        && header
4181                            .dynamic_flags()
4182                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4183                    {
4184                        Ok(BufferCollectionRequest::_UnknownMethod {
4185                            ordinal: header.ordinal,
4186                            control_handle: BufferCollectionControlHandle {
4187                                inner: this.inner.clone(),
4188                            },
4189                            method_type: fidl::MethodType::OneWay,
4190                        })
4191                    }
4192                    _ if header
4193                        .dynamic_flags()
4194                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4195                    {
4196                        this.inner.send_framework_err(
4197                            fidl::encoding::FrameworkErr::UnknownMethod,
4198                            header.tx_id,
4199                            header.ordinal,
4200                            header.dynamic_flags(),
4201                            (bytes, handles),
4202                        )?;
4203                        Ok(BufferCollectionRequest::_UnknownMethod {
4204                            ordinal: header.ordinal,
4205                            control_handle: BufferCollectionControlHandle {
4206                                inner: this.inner.clone(),
4207                            },
4208                            method_type: fidl::MethodType::TwoWay,
4209                        })
4210                    }
4211                    _ => Err(fidl::Error::UnknownOrdinal {
4212                        ordinal: header.ordinal,
4213                        protocol_name:
4214                            <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
4215                    }),
4216                }))
4217            },
4218        )
4219    }
4220}
4221
4222/// [`fuchsia.sysmem2/BufferCollection`] is a connection directly from a
4223/// participant to sysmem re. a buffer collection; often the buffer collection
4224/// is shared with other participants which have their own `BufferCollection`
4225/// client end(s) associated with the same buffer collection.  In other words,
4226/// an instance of the `BufferCollection` interface is a view of a buffer
4227/// collection, not the buffer collection itself.
4228///
4229/// The `BufferCollection` connection exists to facilitate async indication of
4230/// when the buffer collection has been populated with buffers.
4231///
4232/// Also, the channel's closure by the sysmem server is an indication to the
4233/// client that the client should close all VMO handles that were obtained from
4234/// the `BufferCollection` ASAP.
4235///
4236/// Some buffer collections can use enough memory that it can be worth avoiding
4237/// allocation overlap (in time) using
4238/// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] so that the
4239/// initiator can tell when enough buffers of the buffer collection have been
4240/// fully deallocated prior to the initiator allocating a new buffer collection.
4241///
4242/// Epitaphs are not used in this protocol.
4243#[derive(Debug)]
4244pub enum BufferCollectionRequest {
4245    /// Ensure that previous messages have been received server side. This is
4246    /// particularly useful after previous messages that created new tokens,
4247    /// because a token must be known to the sysmem server before sending the
4248    /// token to another participant.
4249    ///
4250    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
4251    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
4252    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
4253    /// to mitigate the possibility of a hostile/fake
4254    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
4255    /// Another way is to pass the token to
4256    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
4257    /// the token as part of exchanging it for a
4258    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
4259    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
4260    /// of stalling.
4261    ///
4262    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
4263    /// and then starting and completing a `Sync`, it's then safe to send the
4264    /// `BufferCollectionToken` client ends to other participants knowing the
4265    /// server will recognize the tokens when they're sent by the other
4266    /// participants to sysmem in a
4267    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
4268    /// efficient way to create tokens while avoiding unnecessary round trips.
4269    ///
4270    /// Other options include waiting for each
4271    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
4272    /// individually (using separate call to `Sync` after each), or calling
4273    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
4274    /// converted to a `BufferCollection` via
4275    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
4276    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
4277    /// the sync step and can create multiple tokens at once.
4278    Sync { responder: BufferCollectionSyncResponder },
4279    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
4280    ///
4281    /// Normally a participant will convert a `BufferCollectionToken` into a
4282    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
4283    /// `Release` via the token (and then close the channel immediately or
4284    /// shortly later in response to server closing the server end), which
4285    /// avoids causing buffer collection failure. Without a prior `Release`,
4286    /// closing the `BufferCollectionToken` client end will cause buffer
4287    /// collection failure.
4288    ///
4289    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
4290    ///
4291    /// By default the server handles unexpected closure of a
4292    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
4293    /// first) by failing the buffer collection. Partly this is to expedite
4294    /// closing VMO handles to reclaim memory when any participant fails. If a
4295    /// participant would like to cleanly close a `BufferCollection` without
4296    /// causing buffer collection failure, the participant can send `Release`
4297    /// before closing the `BufferCollection` client end. The `Release` can
4298    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
4299    /// buffer collection won't require constraints from this node in order to
4300    /// allocate. If after `SetConstraints`, the constraints are retained and
4301    /// aggregated, despite the lack of `BufferCollection` connection at the
4302    /// time of constraints aggregation.
4303    ///
4304    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
4305    ///
4306    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
4307    /// end (without `Release` first) will trigger failure of the buffer
4308    /// collection. To close a `BufferCollectionTokenGroup` channel without
4309    /// failing the buffer collection, ensure that AllChildrenPresent() has been
4310    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
4311    /// client end.
4312    ///
4313    /// If `Release` occurs before
4314    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
4315    /// buffer collection will fail (triggered by reception of `Release` without
4316    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
4317    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
4318    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
4319    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
4320    /// close requires `AllChildrenPresent` (if not already sent), then
4321    /// `Release`, then close client end.
4322    ///
4323    /// If `Release` occurs after `AllChildrenPresent`, the children and all
4324    /// their constraints remain intact (just as they would if the
4325    /// `BufferCollectionTokenGroup` channel had remained open), and the client
4326    /// end close doesn't trigger buffer collection failure.
4327    ///
4328    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
4329    ///
4330    /// For brevity, the per-channel-protocol paragraphs above ignore the
4331    /// separate failure domain created by
4332    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
4333    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
4334    /// unexpectedly closes (without `Release` first) and that client end is
4335    /// under a failure domain, instead of failing the whole buffer collection,
4336    /// the failure domain is failed, but the buffer collection itself is
4337    /// isolated from failure of the failure domain. Such failure domains can be
4338    /// nested, in which case only the inner-most failure domain in which the
4339    /// `Node` resides fails.
4340    Release { control_handle: BufferCollectionControlHandle },
4341    /// Set a name for VMOs in this buffer collection.
4342    ///
4343    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
4344    /// will be truncated to fit. The name of the vmo will be suffixed with the
4345    /// buffer index within the collection (if the suffix fits within
4346    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
4347    /// listed in the inspect data.
4348    ///
4349    /// The name only affects VMOs allocated after the name is set; this call
4350    /// does not rename existing VMOs. If multiple clients set different names
4351    /// then the larger priority value will win. Setting a new name with the
4352    /// same priority as a prior name doesn't change the name.
4353    ///
4354    /// All table fields are currently required.
4355    ///
4356    /// + request `priority` The name is only set if this is the first `SetName`
4357    ///   or if `priority` is greater than any previous `priority` value in
4358    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
4359    /// + request `name` The name for VMOs created under this buffer collection.
4360    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionControlHandle },
4361    /// Set information about the current client that can be used by sysmem to
4362    /// help diagnose leaking memory and allocation stalls waiting for a
4363    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
4364    ///
4365    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
4366    /// `Node`(s) derived from this `Node`, unless overriden by
4367    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
4368    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
4369    ///
4370    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
4371    /// `Allocator` is the most efficient way to ensure that all
4372    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
4373    /// set, and is also more efficient than separately sending the same debug
4374    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
4375    /// created [`fuchsia.sysmem2/Node`].
4376    ///
4377    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
4378    /// indicate which client is closing their channel first, leading to subtree
4379    /// failure (which can be normal if the purpose of the subtree is over, but
4380    /// if happening earlier than expected, the client-channel-specific name can
4381    /// help diagnose where the failure is first coming from, from sysmem's
4382    /// point of view).
4383    ///
4384    /// All table fields are currently required.
4385    ///
4386    /// + request `name` This can be an arbitrary string, but the current
4387    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
4388    /// + request `id` This can be an arbitrary id, but the current process ID
4389    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
4390    SetDebugClientInfo {
4391        payload: NodeSetDebugClientInfoRequest,
4392        control_handle: BufferCollectionControlHandle,
4393    },
4394    /// Sysmem logs a warning if sysmem hasn't seen
4395    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
4396    /// within 5 seconds after creation of a new collection.
4397    ///
4398    /// Clients can call this method to change when the log is printed. If
4399    /// multiple client set the deadline, it's unspecified which deadline will
4400    /// take effect.
4401    ///
4402    /// In most cases the default works well.
4403    ///
4404    /// All table fields are currently required.
4405    ///
4406    /// + request `deadline` The time at which sysmem will start trying to log
4407    ///   the warning, unless all constraints are with sysmem by then.
4408    SetDebugTimeoutLogDeadline {
4409        payload: NodeSetDebugTimeoutLogDeadlineRequest,
4410        control_handle: BufferCollectionControlHandle,
4411    },
4412    /// This enables verbose logging for the buffer collection.
4413    ///
4414    /// Verbose logging includes constraints set via
4415    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
4416    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
4417    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
4418    /// the tree of `Node`(s).
4419    ///
4420    /// Normally sysmem prints only a single line complaint when aggregation
4421    /// fails, with just the specific detailed reason that aggregation failed,
4422    /// with little surrounding context.  While this is often enough to diagnose
4423    /// a problem if only a small change was made and everything was working
4424    /// before the small change, it's often not particularly helpful for getting
4425    /// a new buffer collection to work for the first time.  Especially with
4426    /// more complex trees of nodes, involving things like
4427    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
4428    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
4429    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
4430    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
4431    /// looks like and why it's failing a logical allocation, or why a tree or
4432    /// subtree is failing sooner than expected.
4433    ///
4434    /// The intent of the extra logging is to be acceptable from a performance
4435    /// point of view, under the assumption that verbose logging is only enabled
4436    /// on a low number of buffer collections. If we're not tracking down a bug,
4437    /// we shouldn't send this message.
4438    SetVerboseLogging { control_handle: BufferCollectionControlHandle },
4439    /// This gets a handle that can be used as a parameter to
4440    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
4441    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
4442    /// client obtained this handle from this `Node`.
4443    ///
4444    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
4445    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
4446    /// despite the two calls typically being on different channels.
4447    ///
4448    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
4449    ///
4450    /// All table fields are currently required.
4451    ///
4452    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
4453    ///   different `Node` channel, to prove that the client obtained the handle
4454    ///   from this `Node`.
4455    GetNodeRef { responder: BufferCollectionGetNodeRefResponder },
4456    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
4457    /// rooted at a different child token of a common parent
4458    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
4459    /// passed-in `node_ref`.
4460    ///
4461    /// This call is for assisting with admission control de-duplication, and
4462    /// with debugging.
4463    ///
4464    /// The `node_ref` must be obtained using
4465    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
4466    ///
4467    /// The `node_ref` can be a duplicated handle; it's not necessary to call
4468    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
4469    ///
4470    /// If a calling token may not actually be a valid token at all due to a
4471    /// potentially hostile/untrusted provider of the token, call
4472    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
4473    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
4474    /// never responds due to a calling token not being a real token (not really
4475    /// talking to sysmem).  Another option is to call
4476    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
4477    /// which also validates the token along with converting it to a
4478    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
4479    ///
4480    /// All table fields are currently required.
4481    ///
4482    /// - response `is_alternate`
4483    ///   - true: The first parent node in common between the calling node and
4484    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
4485    ///     that the calling `Node` and the `node_ref` `Node` will not have both
4486    ///     their constraints apply - rather sysmem will choose one or the other
4487    ///     of the constraints - never both.  This is because only one child of
4488    ///     a `BufferCollectionTokenGroup` is selected during logical
4489    ///     allocation, with only that one child's subtree contributing to
4490    ///     constraints aggregation.
4491    ///   - false: The first parent node in common between the calling `Node`
4492    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
4493    ///     Currently, this means the first parent node in common is a
4494    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
4495    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
4496    ///     `Node` may have both their constraints apply during constraints
4497    ///     aggregation of the logical allocation, if both `Node`(s) are
4498    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
4499    ///     this case, there is no `BufferCollectionTokenGroup` that will
4500    ///     directly prevent the two `Node`(s) from both being selected and
4501    ///     their constraints both aggregated, but even when false, one or both
4502    ///     `Node`(s) may still be eliminated from consideration if one or both
4503    ///     `Node`(s) has a direct or indirect parent
4504    ///     `BufferCollectionTokenGroup` which selects a child subtree other
4505    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
4506    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
4507    ///   associated with the same buffer collection as the calling `Node`.
4508    ///   Another reason for this error is if the `node_ref` is an
4509    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
4510    ///   a real `node_ref` obtained from `GetNodeRef`.
4511    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
4512    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
4513    ///   the needed rights expected on a real `node_ref`.
4514    /// * No other failing status codes are returned by this call.  However,
4515    ///   sysmem may add additional codes in future, so the client should have
4516    ///   sensible default handling for any failing status code.
4517    IsAlternateFor {
4518        payload: NodeIsAlternateForRequest,
4519        responder: BufferCollectionIsAlternateForResponder,
4520    },
4521    /// Get the buffer collection ID. This ID is also available from
4522    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
4523    /// within the collection).
4524    ///
4525    /// This call is mainly useful in situations where we can't convey a
4526    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
4527    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
4528    /// handle, which can be joined back up with a `BufferCollection` client end
4529    /// that was created via a different path. Prefer to convey a
4530    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
4531    ///
4532    /// Trusting a `buffer_collection_id` value from a source other than sysmem
4533    /// is analogous to trusting a koid value from a source other than zircon.
4534    /// Both should be avoided unless really necessary, and both require
4535    /// caution. In some situations it may be reasonable to refer to a
4536    /// pre-established `BufferCollection` by `buffer_collection_id` via a
4537    /// protocol for efficiency reasons, but an incoming value purporting to be
4538    /// a `buffer_collection_id` is not sufficient alone to justify granting the
4539    /// sender of the `buffer_collection_id` any capability. The sender must
4540    /// first prove to a receiver that the sender has/had a VMO or has/had a
4541    /// `BufferCollectionToken` to the same collection by sending a handle that
4542    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
4543    /// `buffer_collection_id` value. The receiver should take care to avoid
4544    /// assuming that a sender had a `BufferCollectionToken` in cases where the
4545    /// sender has only proven that the sender had a VMO.
4546    ///
4547    /// - response `buffer_collection_id` This ID is unique per buffer
4548    ///   collection per boot. Each buffer is uniquely identified by the
4549    ///   `buffer_collection_id` and `buffer_index` together.
4550    GetBufferCollectionId { responder: BufferCollectionGetBufferCollectionIdResponder },
4551    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
4552    /// created after this message to weak, which means that a client's `Node`
4553    /// client end (or a child created after this message) is not alone
4554    /// sufficient to keep allocated VMOs alive.
4555    ///
4556    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
4557    /// `close_weak_asap`.
4558    ///
4559    /// This message is only permitted before the `Node` becomes ready for
4560    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
4561    ///   * `BufferCollectionToken`: any time
4562    ///   * `BufferCollection`: before `SetConstraints`
4563    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
4564    ///
4565    /// Currently, no conversion from strong `Node` to weak `Node` after ready
4566    /// for allocation is provided, but a client can simulate that by creating
4567    /// an additional `Node` before allocation and setting that additional
4568    /// `Node` to weak, and then potentially at some point later sending
4569    /// `Release` and closing the client end of the client's strong `Node`, but
4570    /// keeping the client's weak `Node`.
4571    ///
4572    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
4573    /// collection failure (all `Node` client end(s) will see
4574    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
4575    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
4576    /// this situation until all `Node`(s) are ready for allocation. For initial
4577    /// allocation to succeed, at least one strong `Node` is required to exist
4578    /// at allocation time, but after that client receives VMO handles, that
4579    /// client can `BufferCollection.Release` and close the client end without
4580    /// causing this type of failure.
4581    ///
4582    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
4583    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
4584    /// separately as appropriate.
4585    SetWeak { control_handle: BufferCollectionControlHandle },
4586    /// This indicates to sysmem that the client is prepared to pay attention to
4587    /// `close_weak_asap`.
4588    ///
4589    /// If sent, this message must be before
4590    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
4591    ///
4592    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
4593    /// send this message before `WaitForAllBuffersAllocated`, or a parent
4594    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
4595    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
4596    /// trigger buffer collection failure.
4597    ///
4598    /// This message is necessary because weak sysmem VMOs have not always been
4599    /// a thing, so older clients are not aware of the need to pay attention to
4600    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
4601    /// sysmem weak VMO handles asap. By having this message and requiring
4602    /// participants to indicate their acceptance of this aspect of the overall
4603    /// protocol, we avoid situations where an older client is delivered a weak
4604    /// VMO without any way for sysmem to get that VMO to close quickly later
4605    /// (and on a per-buffer basis).
4606    ///
4607    /// A participant that doesn't handle `close_weak_asap` and also doesn't
4608    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
4609    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
4610    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
4611    /// same participant has a child/delegate which does retrieve VMOs, that
4612    /// child/delegate will need to send `SetWeakOk` before
4613    /// `WaitForAllBuffersAllocated`.
4614    ///
4615    /// + request `for_child_nodes_also` If present and true, this means direct
4616    ///   child nodes of this node created after this message plus all
4617    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
4618    ///   those nodes. Any child node of this node that was created before this
4619    ///   message is not included. This setting is "sticky" in the sense that a
4620    ///   subsequent `SetWeakOk` without this bool set to true does not reset
4621    ///   the server-side bool. If this creates a problem for a participant, a
4622    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
4623    ///   tokens instead, as appropriate. A participant should only set
4624    ///   `for_child_nodes_also` true if the participant can really promise to
4625    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
4626    ///   weak VMO handles held by participants holding the corresponding child
4627    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
4628    ///   which are using sysmem(1) can be weak, despite the clients of those
4629    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
4630    ///   direct way to find out about `close_weak_asap`. This only applies to
4631    ///   descendents of this `Node` which are using sysmem(1), not to this
4632    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
4633    ///   token, which will fail allocation unless an ancestor of this `Node`
4634    ///   specified `for_child_nodes_also` true.
4635    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionControlHandle },
4636    /// The server_end will be closed after this `Node` and any child nodes have
4637    /// have released their buffer counts, making those counts available for
4638    /// reservation by a different `Node` via
4639    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
4640    ///
4641    /// The `Node` buffer counts may not be released until the entire tree of
4642    /// `Node`(s) is closed or failed, because
4643    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
4644    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
4645    /// `Node` buffer counts remain reserved until the orphaned node is later
4646    /// cleaned up.
4647    ///
4648    /// If the `Node` exceeds a fairly large number of attached eventpair server
4649    /// ends, a log message will indicate this and the `Node` (and the
4650    /// appropriate) sub-tree will fail.
4651    ///
4652    /// The `server_end` will remain open when
4653    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
4654    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
4655    /// [`fuchsia.sysmem2/BufferCollection`].
4656    ///
4657    /// This message can also be used with a
4658    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
4659    AttachNodeTracking {
4660        payload: NodeAttachNodeTrackingRequest,
4661        control_handle: BufferCollectionControlHandle,
4662    },
4663    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
4664    /// collection.
4665    ///
4666    /// A participant may only call
4667    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
4668    /// [`fuchsia.sysmem2/BufferCollection`].
4669    ///
4670    /// For buffer allocation to be attempted, all holders of a
4671    /// `BufferCollection` client end need to call `SetConstraints` before
4672    /// sysmem will attempt to allocate buffers.
4673    ///
4674    /// + request `constraints` These are the constraints on the buffer
4675    ///   collection imposed by the sending client/participant.  The
4676    ///   `constraints` field is not required to be set. If not set, the client
4677    ///   is not setting any actual constraints, but is indicating that the
4678    ///   client has no constraints to set. A client that doesn't set the
4679    ///   `constraints` field won't receive any VMO handles, but can still find
4680    ///   out how many buffers were allocated and can still refer to buffers by
4681    ///   their `buffer_index`.
4682    SetConstraints {
4683        payload: BufferCollectionSetConstraintsRequest,
4684        control_handle: BufferCollectionControlHandle,
4685    },
4686    /// Wait until all buffers are allocated.
4687    ///
4688    /// This FIDL call completes when buffers have been allocated, or completes
4689    /// with some failure detail if allocation has been attempted but failed.
4690    ///
4691    /// The following must occur before buffers will be allocated:
4692    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
4693    ///     collection must be turned in via `BindSharedCollection` to get a
4694    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
4695    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
4696    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
4697    ///     to them.
4698    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
4699    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
4700    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
4701    ///     sent to them.
4702    ///
4703    /// - result `buffer_collection_info` The VMO handles and other related
4704    ///   info.
4705    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
4706    ///   cannot be fulfilled due to resource exhaustion.
4707    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
4708    ///   malformed.
4709    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
4710    ///   request is valid but cannot be satisfied, perhaps due to hardware
4711    ///   limitations. This can happen if participants have incompatible
4712    ///   constraints (empty intersection, roughly speaking). See the log for
4713    ///   more info. In cases where a participant could potentially be treated
4714    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
4715    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
4716    ///   error code if there aren't enough buffers in the pre-existing
4717    ///   collection to satisfy the constraints set on the attached token and
4718    ///   any sub-tree of tokens derived from the attached token.
4719    WaitForAllBuffersAllocated { responder: BufferCollectionWaitForAllBuffersAllocatedResponder },
4720    /// Checks whether all the buffers have been allocated, in a polling
4721    /// fashion.
4722    ///
4723    /// * If the buffer collection has been allocated, returns success.
4724    /// * If the buffer collection failed allocation, returns the same
4725    ///   [`fuchsia.sysmem2/Error`] as
4726    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
4727    ///   return.
4728    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
4729    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
4730    ///   would not respond quickly.
4731    CheckAllBuffersAllocated { responder: BufferCollectionCheckAllBuffersAllocatedResponder },
4732    /// Create a new token to add a new participant to an existing logical
4733    /// buffer collection, if the existing collection's buffer counts,
4734    /// constraints, and participants allow.
4735    ///
4736    /// This can be useful in replacing a failed participant, and/or in
4737    /// adding/re-adding a participant after buffers have already been
4738    /// allocated.
4739    ///
4740    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
4741    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
4742    /// goes through the normal procedure of setting constraints or closing
4743    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
4744    /// clients' point of view, despite the possibility that all the buffers
4745    /// were actually allocated previously. This process is called "logical
4746    /// allocation". Most instances of "allocation" in docs for other messages
4747    /// can also be read as "allocation or logical allocation" while remaining
4748    /// valid, but we just say "allocation" in most places for brevity/clarity
4749    /// of explanation, with the details of "logical allocation" left for the
4750    /// docs here on `AttachToken`.
4751    ///
4752    /// Failure of an attached `Node` does not propagate to the parent of the
4753    /// attached `Node`. More generally, failure of a child `Node` is blocked
4754    /// from reaching its parent `Node` if the child is attached, or if the
4755    /// child is dispensable and the failure occurred after logical allocation
4756    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
4757    ///
4758    /// A participant may in some scenarios choose to initially use a
4759    /// dispensable token for a given instance of a delegate participant, and
4760    /// then later if the first instance of that delegate participant fails, a
4761    /// new second instance of that delegate participant my be given a token
4762    /// created with `AttachToken`.
4763    ///
4764    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
4765    /// client end, the token acts like any other token. The client can
4766    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
4767    /// and can send the token to a different process/participant. The
4768    /// `BufferCollectionToken` `Node` should be converted to a
4769    /// `BufferCollection` `Node` as normal by sending
4770    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
4771    /// without causing subtree failure by sending
4772    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
4773    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
4774    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
4775    /// the `BufferCollection`.
4776    ///
4777    /// Within the subtree, a success result from
4778    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
4779    /// the subtree participants' constraints were satisfiable using the
4780    /// already-existing buffer collection, the already-established
4781    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
4782    /// constraints, and the already-existing other participants (already added
4783    /// via successful logical allocation) and their specified buffer counts in
4784    /// their constraints. A failure result means the new participants'
4785    /// constraints cannot be satisfied using the existing buffer collection and
4786    /// its already-added participants. Creating a new collection instead may
4787    /// allow all participants' constraints to be satisfied, assuming
4788    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
4789    /// used.
4790    ///
4791    /// A token created with `AttachToken` performs constraints aggregation with
4792    /// all constraints currently in effect on the buffer collection, plus the
4793    /// attached token under consideration plus child tokens under the attached
4794    /// token which are not themselves an attached token or under such a token.
4795    /// Further subtrees under this subtree are considered for logical
4796    /// allocation only after this subtree has completed logical allocation.
4797    ///
4798    /// Assignment of existing buffers to participants'
4799    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
4800    /// etc is first-come first-served, but a child can't logically allocate
4801    /// before all its parents have sent `SetConstraints`.
4802    ///
4803    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
4804    /// in contrast to `AttachToken`, has the created token `Node` + child
4805    /// `Node`(s) (in the created subtree but not in any subtree under this
4806    /// subtree) participate in constraints aggregation along with its parent
4807    /// during the parent's allocation or logical allocation.
4808    ///
4809    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
4810    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
4811    /// sysmem before the new token can be passed to `BindSharedCollection`. The
4812    /// `Sync` of the new token can be accomplished with
4813    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
4814    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
4815    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
4816    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
4817    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
4818    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
4819    /// created token, to also sync those additional tokens to sysmem using a
4820    /// single round-trip.
4821    ///
4822    /// All table fields are currently required.
4823    ///
4824    /// + request `rights_attentuation_mask` This allows attenuating the VMO
4825    ///   rights of the subtree. These values for `rights_attenuation_mask`
4826    ///   result in no attenuation (note that 0 is not on this list):
4827    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
4828    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
4829    /// + request `token_request` The server end of the `BufferCollectionToken`
4830    ///   channel. The client retains the client end.
4831    AttachToken {
4832        payload: BufferCollectionAttachTokenRequest,
4833        control_handle: BufferCollectionControlHandle,
4834    },
4835    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
4836    /// buffers have been allocated and only the specified number of buffers (or
4837    /// fewer) remain in the buffer collection.
4838    ///
4839    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
4840    /// client to wait until an old buffer collection is fully or mostly
4841    /// deallocated before attempting allocation of a new buffer collection. The
4842    /// eventpair is only signalled when the buffers of this collection have
4843    /// been fully deallocated (not just un-referenced by clients, but all the
4844    /// memory consumed by those buffers has been fully reclaimed/recycled), or
4845    /// when allocation or logical allocation fails for the tree or subtree
4846    /// including this [`fuchsia.sysmem2/BufferCollection`].
4847    ///
4848    /// The eventpair won't be signalled until allocation or logical allocation
4849    /// has completed; until then, the collection's current buffer count is
4850    /// ignored.
4851    ///
4852    /// If logical allocation fails for an attached subtree (using
4853    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
4854    /// eventpair will close during that failure regardless of the number of
4855    /// buffers potenitally allocated in the overall buffer collection. This is
4856    /// for logical allocation consistency with normal allocation.
4857    ///
4858    /// The lifetime signalled by this event includes asynchronous cleanup of
4859    /// allocated buffers, and this asynchronous cleanup cannot occur until all
4860    /// holders of VMO handles to the buffers have closed those VMO handles.
4861    /// Therefore, clients should take care not to become blocked forever
4862    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
4863    /// participants using the logical buffer collection (including the waiter
4864    /// itself) are less trusted, less reliable, or potentially blocked by the
4865    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
4866    /// for the client wait may be prudent, depending on details of how the
4867    /// collection and/or its VMOs are used or shared. Failure to allocate a
4868    /// new/replacement buffer collection is better than getting stuck forever.
4869    ///
4870    /// The sysmem server itself intentionally does not perform any waiting on
4871    /// already-failed collections' VMOs to finish cleaning up before attempting
4872    /// a new allocation, and the sysmem server intentionally doesn't retry
4873    /// allocation if a new allocation fails due to out of memory, even if that
4874    /// failure is potentially due to continued existence of an old collection's
4875    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
4876    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
4877    /// as long as the waiting client is careful to not create a deadlock.
4878    ///
4879    /// Continued existence of old collections that are still cleaning up is not
4880    /// the only reason that a new allocation may fail due to insufficient
4881    /// memory, even if the new allocation is allocating physically contiguous
4882    /// buffers. Overall system memory pressure can also be the cause of failure
4883    /// to allocate a new collection. See also
4884    /// [`fuchsia.memorypressure/Provider`].
4885    ///
4886    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
4887    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
4888    /// `eventpair` handle (server end) can be sent via more than one
4889    /// `AttachLifetimeTracking` message to different protocols, and the
4890    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
4891    /// the conditions are met (all holders of duplicates have closed their
4892    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
4893    /// client end can (also) be duplicated without preventing the
4894    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
4895    ///
4896    /// The server intentionally doesn't "trust" any signals set on the
4897    /// `server_end`. This mechanism intentionally uses only
4898    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
4899    /// "early", and is only set when all handles to the server end eventpair
4900    /// are closed. No meaning is associated with any of the other signals, and
4901    /// clients should ignore any other signal bits on either end of the
4902    /// `eventpair`.
4903    ///
4904    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
4905    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
4906    /// transfer without causing `BufferCollection` channel failure).
4907    ///
4908    /// All table fields are currently required.
4909    ///
4910    /// + request `server_end` This eventpair handle will be closed by the
4911    ///   sysmem server when buffers have been allocated initially and the
4912    ///   number of buffers is then less than or equal to `buffers_remaining`.
4913    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
4914    ///   fewer) buffers to be fully deallocated. A number greater than zero can
4915    ///   be useful in situations where a known number of buffers are
4916    ///   intentionally not closed so that the data can continue to be used,
4917    ///   such as for keeping the last available video frame displayed in the UI
4918    ///   even if the video stream was using protected output buffers. It's
4919    ///   outside the scope of the `BufferCollection` interface (at least for
4920    ///   now) to determine how many buffers may be held without closing, but
4921    ///   it'll typically be in the range 0-2.
4922    AttachLifetimeTracking {
4923        payload: BufferCollectionAttachLifetimeTrackingRequest,
4924        control_handle: BufferCollectionControlHandle,
4925    },
4926    /// An interaction was received which does not match any known method.
4927    #[non_exhaustive]
4928    _UnknownMethod {
4929        /// Ordinal of the method that was called.
4930        ordinal: u64,
4931        control_handle: BufferCollectionControlHandle,
4932        method_type: fidl::MethodType,
4933    },
4934}
4935
4936impl BufferCollectionRequest {
4937    #[allow(irrefutable_let_patterns)]
4938    pub fn into_sync(self) -> Option<(BufferCollectionSyncResponder)> {
4939        if let BufferCollectionRequest::Sync { responder } = self {
4940            Some((responder))
4941        } else {
4942            None
4943        }
4944    }
4945
4946    #[allow(irrefutable_let_patterns)]
4947    pub fn into_release(self) -> Option<(BufferCollectionControlHandle)> {
4948        if let BufferCollectionRequest::Release { control_handle } = self {
4949            Some((control_handle))
4950        } else {
4951            None
4952        }
4953    }
4954
4955    #[allow(irrefutable_let_patterns)]
4956    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionControlHandle)> {
4957        if let BufferCollectionRequest::SetName { payload, control_handle } = self {
4958            Some((payload, control_handle))
4959        } else {
4960            None
4961        }
4962    }
4963
4964    #[allow(irrefutable_let_patterns)]
4965    pub fn into_set_debug_client_info(
4966        self,
4967    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionControlHandle)> {
4968        if let BufferCollectionRequest::SetDebugClientInfo { payload, control_handle } = self {
4969            Some((payload, control_handle))
4970        } else {
4971            None
4972        }
4973    }
4974
4975    #[allow(irrefutable_let_patterns)]
4976    pub fn into_set_debug_timeout_log_deadline(
4977        self,
4978    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionControlHandle)> {
4979        if let BufferCollectionRequest::SetDebugTimeoutLogDeadline { payload, control_handle } =
4980            self
4981        {
4982            Some((payload, control_handle))
4983        } else {
4984            None
4985        }
4986    }
4987
4988    #[allow(irrefutable_let_patterns)]
4989    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionControlHandle)> {
4990        if let BufferCollectionRequest::SetVerboseLogging { control_handle } = self {
4991            Some((control_handle))
4992        } else {
4993            None
4994        }
4995    }
4996
4997    #[allow(irrefutable_let_patterns)]
4998    pub fn into_get_node_ref(self) -> Option<(BufferCollectionGetNodeRefResponder)> {
4999        if let BufferCollectionRequest::GetNodeRef { responder } = self {
5000            Some((responder))
5001        } else {
5002            None
5003        }
5004    }
5005
5006    #[allow(irrefutable_let_patterns)]
5007    pub fn into_is_alternate_for(
5008        self,
5009    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionIsAlternateForResponder)> {
5010        if let BufferCollectionRequest::IsAlternateFor { payload, responder } = self {
5011            Some((payload, responder))
5012        } else {
5013            None
5014        }
5015    }
5016
5017    #[allow(irrefutable_let_patterns)]
5018    pub fn into_get_buffer_collection_id(
5019        self,
5020    ) -> Option<(BufferCollectionGetBufferCollectionIdResponder)> {
5021        if let BufferCollectionRequest::GetBufferCollectionId { responder } = self {
5022            Some((responder))
5023        } else {
5024            None
5025        }
5026    }
5027
5028    #[allow(irrefutable_let_patterns)]
5029    pub fn into_set_weak(self) -> Option<(BufferCollectionControlHandle)> {
5030        if let BufferCollectionRequest::SetWeak { control_handle } = self {
5031            Some((control_handle))
5032        } else {
5033            None
5034        }
5035    }
5036
5037    #[allow(irrefutable_let_patterns)]
5038    pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, BufferCollectionControlHandle)> {
5039        if let BufferCollectionRequest::SetWeakOk { payload, control_handle } = self {
5040            Some((payload, control_handle))
5041        } else {
5042            None
5043        }
5044    }
5045
5046    #[allow(irrefutable_let_patterns)]
5047    pub fn into_attach_node_tracking(
5048        self,
5049    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionControlHandle)> {
5050        if let BufferCollectionRequest::AttachNodeTracking { payload, control_handle } = self {
5051            Some((payload, control_handle))
5052        } else {
5053            None
5054        }
5055    }
5056
5057    #[allow(irrefutable_let_patterns)]
5058    pub fn into_set_constraints(
5059        self,
5060    ) -> Option<(BufferCollectionSetConstraintsRequest, BufferCollectionControlHandle)> {
5061        if let BufferCollectionRequest::SetConstraints { payload, control_handle } = self {
5062            Some((payload, control_handle))
5063        } else {
5064            None
5065        }
5066    }
5067
5068    #[allow(irrefutable_let_patterns)]
5069    pub fn into_wait_for_all_buffers_allocated(
5070        self,
5071    ) -> Option<(BufferCollectionWaitForAllBuffersAllocatedResponder)> {
5072        if let BufferCollectionRequest::WaitForAllBuffersAllocated { responder } = self {
5073            Some((responder))
5074        } else {
5075            None
5076        }
5077    }
5078
5079    #[allow(irrefutable_let_patterns)]
5080    pub fn into_check_all_buffers_allocated(
5081        self,
5082    ) -> Option<(BufferCollectionCheckAllBuffersAllocatedResponder)> {
5083        if let BufferCollectionRequest::CheckAllBuffersAllocated { responder } = self {
5084            Some((responder))
5085        } else {
5086            None
5087        }
5088    }
5089
5090    #[allow(irrefutable_let_patterns)]
5091    pub fn into_attach_token(
5092        self,
5093    ) -> Option<(BufferCollectionAttachTokenRequest, BufferCollectionControlHandle)> {
5094        if let BufferCollectionRequest::AttachToken { payload, control_handle } = self {
5095            Some((payload, control_handle))
5096        } else {
5097            None
5098        }
5099    }
5100
5101    #[allow(irrefutable_let_patterns)]
5102    pub fn into_attach_lifetime_tracking(
5103        self,
5104    ) -> Option<(BufferCollectionAttachLifetimeTrackingRequest, BufferCollectionControlHandle)>
5105    {
5106        if let BufferCollectionRequest::AttachLifetimeTracking { payload, control_handle } = self {
5107            Some((payload, control_handle))
5108        } else {
5109            None
5110        }
5111    }
5112
5113    /// Name of the method defined in FIDL
5114    pub fn method_name(&self) -> &'static str {
5115        match *self {
5116            BufferCollectionRequest::Sync { .. } => "sync",
5117            BufferCollectionRequest::Release { .. } => "release",
5118            BufferCollectionRequest::SetName { .. } => "set_name",
5119            BufferCollectionRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
5120            BufferCollectionRequest::SetDebugTimeoutLogDeadline { .. } => {
5121                "set_debug_timeout_log_deadline"
5122            }
5123            BufferCollectionRequest::SetVerboseLogging { .. } => "set_verbose_logging",
5124            BufferCollectionRequest::GetNodeRef { .. } => "get_node_ref",
5125            BufferCollectionRequest::IsAlternateFor { .. } => "is_alternate_for",
5126            BufferCollectionRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
5127            BufferCollectionRequest::SetWeak { .. } => "set_weak",
5128            BufferCollectionRequest::SetWeakOk { .. } => "set_weak_ok",
5129            BufferCollectionRequest::AttachNodeTracking { .. } => "attach_node_tracking",
5130            BufferCollectionRequest::SetConstraints { .. } => "set_constraints",
5131            BufferCollectionRequest::WaitForAllBuffersAllocated { .. } => {
5132                "wait_for_all_buffers_allocated"
5133            }
5134            BufferCollectionRequest::CheckAllBuffersAllocated { .. } => {
5135                "check_all_buffers_allocated"
5136            }
5137            BufferCollectionRequest::AttachToken { .. } => "attach_token",
5138            BufferCollectionRequest::AttachLifetimeTracking { .. } => "attach_lifetime_tracking",
5139            BufferCollectionRequest::_UnknownMethod {
5140                method_type: fidl::MethodType::OneWay,
5141                ..
5142            } => "unknown one-way method",
5143            BufferCollectionRequest::_UnknownMethod {
5144                method_type: fidl::MethodType::TwoWay,
5145                ..
5146            } => "unknown two-way method",
5147        }
5148    }
5149}
5150
5151#[derive(Debug, Clone)]
5152pub struct BufferCollectionControlHandle {
5153    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
5154}
5155
5156impl fidl::endpoints::ControlHandle for BufferCollectionControlHandle {
5157    fn shutdown(&self) {
5158        self.inner.shutdown()
5159    }
5160    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
5161        self.inner.shutdown_with_epitaph(status)
5162    }
5163
5164    fn is_closed(&self) -> bool {
5165        self.inner.channel().is_closed()
5166    }
5167    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
5168        self.inner.channel().on_closed()
5169    }
5170
5171    #[cfg(target_os = "fuchsia")]
5172    fn signal_peer(
5173        &self,
5174        clear_mask: zx::Signals,
5175        set_mask: zx::Signals,
5176    ) -> Result<(), zx_status::Status> {
5177        use fidl::Peered;
5178        self.inner.channel().signal_peer(clear_mask, set_mask)
5179    }
5180}
5181
5182impl BufferCollectionControlHandle {}
5183
5184#[must_use = "FIDL methods require a response to be sent"]
5185#[derive(Debug)]
5186pub struct BufferCollectionSyncResponder {
5187    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5188    tx_id: u32,
5189}
5190
5191/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5192/// if the responder is dropped without sending a response, so that the client
5193/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5194impl std::ops::Drop for BufferCollectionSyncResponder {
5195    fn drop(&mut self) {
5196        self.control_handle.shutdown();
5197        // Safety: drops once, never accessed again
5198        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5199    }
5200}
5201
5202impl fidl::endpoints::Responder for BufferCollectionSyncResponder {
5203    type ControlHandle = BufferCollectionControlHandle;
5204
5205    fn control_handle(&self) -> &BufferCollectionControlHandle {
5206        &self.control_handle
5207    }
5208
5209    fn drop_without_shutdown(mut self) {
5210        // Safety: drops once, never accessed again due to mem::forget
5211        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5212        // Prevent Drop from running (which would shut down the channel)
5213        std::mem::forget(self);
5214    }
5215}
5216
5217impl BufferCollectionSyncResponder {
5218    /// Sends a response to the FIDL transaction.
5219    ///
5220    /// Sets the channel to shutdown if an error occurs.
5221    pub fn send(self) -> Result<(), fidl::Error> {
5222        let _result = self.send_raw();
5223        if _result.is_err() {
5224            self.control_handle.shutdown();
5225        }
5226        self.drop_without_shutdown();
5227        _result
5228    }
5229
5230    /// Similar to "send" but does not shutdown the channel if an error occurs.
5231    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
5232        let _result = self.send_raw();
5233        self.drop_without_shutdown();
5234        _result
5235    }
5236
5237    fn send_raw(&self) -> Result<(), fidl::Error> {
5238        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
5239            fidl::encoding::Flexible::new(()),
5240            self.tx_id,
5241            0x11ac2555cf575b54,
5242            fidl::encoding::DynamicFlags::FLEXIBLE,
5243        )
5244    }
5245}
5246
5247#[must_use = "FIDL methods require a response to be sent"]
5248#[derive(Debug)]
5249pub struct BufferCollectionGetNodeRefResponder {
5250    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5251    tx_id: u32,
5252}
5253
5254/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5255/// if the responder is dropped without sending a response, so that the client
5256/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5257impl std::ops::Drop for BufferCollectionGetNodeRefResponder {
5258    fn drop(&mut self) {
5259        self.control_handle.shutdown();
5260        // Safety: drops once, never accessed again
5261        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5262    }
5263}
5264
5265impl fidl::endpoints::Responder for BufferCollectionGetNodeRefResponder {
5266    type ControlHandle = BufferCollectionControlHandle;
5267
5268    fn control_handle(&self) -> &BufferCollectionControlHandle {
5269        &self.control_handle
5270    }
5271
5272    fn drop_without_shutdown(mut self) {
5273        // Safety: drops once, never accessed again due to mem::forget
5274        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5275        // Prevent Drop from running (which would shut down the channel)
5276        std::mem::forget(self);
5277    }
5278}
5279
5280impl BufferCollectionGetNodeRefResponder {
5281    /// Sends a response to the FIDL transaction.
5282    ///
5283    /// Sets the channel to shutdown if an error occurs.
5284    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5285        let _result = self.send_raw(payload);
5286        if _result.is_err() {
5287            self.control_handle.shutdown();
5288        }
5289        self.drop_without_shutdown();
5290        _result
5291    }
5292
5293    /// Similar to "send" but does not shutdown the channel if an error occurs.
5294    pub fn send_no_shutdown_on_err(
5295        self,
5296        mut payload: NodeGetNodeRefResponse,
5297    ) -> Result<(), fidl::Error> {
5298        let _result = self.send_raw(payload);
5299        self.drop_without_shutdown();
5300        _result
5301    }
5302
5303    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5304        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
5305            fidl::encoding::Flexible::new(&mut payload),
5306            self.tx_id,
5307            0x5b3d0e51614df053,
5308            fidl::encoding::DynamicFlags::FLEXIBLE,
5309        )
5310    }
5311}
5312
5313#[must_use = "FIDL methods require a response to be sent"]
5314#[derive(Debug)]
5315pub struct BufferCollectionIsAlternateForResponder {
5316    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5317    tx_id: u32,
5318}
5319
5320/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5321/// if the responder is dropped without sending a response, so that the client
5322/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5323impl std::ops::Drop for BufferCollectionIsAlternateForResponder {
5324    fn drop(&mut self) {
5325        self.control_handle.shutdown();
5326        // Safety: drops once, never accessed again
5327        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5328    }
5329}
5330
5331impl fidl::endpoints::Responder for BufferCollectionIsAlternateForResponder {
5332    type ControlHandle = BufferCollectionControlHandle;
5333
5334    fn control_handle(&self) -> &BufferCollectionControlHandle {
5335        &self.control_handle
5336    }
5337
5338    fn drop_without_shutdown(mut self) {
5339        // Safety: drops once, never accessed again due to mem::forget
5340        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5341        // Prevent Drop from running (which would shut down the channel)
5342        std::mem::forget(self);
5343    }
5344}
5345
5346impl BufferCollectionIsAlternateForResponder {
5347    /// Sends a response to the FIDL transaction.
5348    ///
5349    /// Sets the channel to shutdown if an error occurs.
5350    pub fn send(
5351        self,
5352        mut result: Result<&NodeIsAlternateForResponse, Error>,
5353    ) -> Result<(), fidl::Error> {
5354        let _result = self.send_raw(result);
5355        if _result.is_err() {
5356            self.control_handle.shutdown();
5357        }
5358        self.drop_without_shutdown();
5359        _result
5360    }
5361
5362    /// Similar to "send" but does not shutdown the channel if an error occurs.
5363    pub fn send_no_shutdown_on_err(
5364        self,
5365        mut result: Result<&NodeIsAlternateForResponse, Error>,
5366    ) -> Result<(), fidl::Error> {
5367        let _result = self.send_raw(result);
5368        self.drop_without_shutdown();
5369        _result
5370    }
5371
5372    fn send_raw(
5373        &self,
5374        mut result: Result<&NodeIsAlternateForResponse, Error>,
5375    ) -> Result<(), fidl::Error> {
5376        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5377            NodeIsAlternateForResponse,
5378            Error,
5379        >>(
5380            fidl::encoding::FlexibleResult::new(result),
5381            self.tx_id,
5382            0x3a58e00157e0825,
5383            fidl::encoding::DynamicFlags::FLEXIBLE,
5384        )
5385    }
5386}
5387
5388#[must_use = "FIDL methods require a response to be sent"]
5389#[derive(Debug)]
5390pub struct BufferCollectionGetBufferCollectionIdResponder {
5391    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5392    tx_id: u32,
5393}
5394
5395/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5396/// if the responder is dropped without sending a response, so that the client
5397/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5398impl std::ops::Drop for BufferCollectionGetBufferCollectionIdResponder {
5399    fn drop(&mut self) {
5400        self.control_handle.shutdown();
5401        // Safety: drops once, never accessed again
5402        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5403    }
5404}
5405
5406impl fidl::endpoints::Responder for BufferCollectionGetBufferCollectionIdResponder {
5407    type ControlHandle = BufferCollectionControlHandle;
5408
5409    fn control_handle(&self) -> &BufferCollectionControlHandle {
5410        &self.control_handle
5411    }
5412
5413    fn drop_without_shutdown(mut self) {
5414        // Safety: drops once, never accessed again due to mem::forget
5415        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5416        // Prevent Drop from running (which would shut down the channel)
5417        std::mem::forget(self);
5418    }
5419}
5420
5421impl BufferCollectionGetBufferCollectionIdResponder {
5422    /// Sends a response to the FIDL transaction.
5423    ///
5424    /// Sets the channel to shutdown if an error occurs.
5425    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5426        let _result = self.send_raw(payload);
5427        if _result.is_err() {
5428            self.control_handle.shutdown();
5429        }
5430        self.drop_without_shutdown();
5431        _result
5432    }
5433
5434    /// Similar to "send" but does not shutdown the channel if an error occurs.
5435    pub fn send_no_shutdown_on_err(
5436        self,
5437        mut payload: &NodeGetBufferCollectionIdResponse,
5438    ) -> Result<(), fidl::Error> {
5439        let _result = self.send_raw(payload);
5440        self.drop_without_shutdown();
5441        _result
5442    }
5443
5444    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5445        self.control_handle
5446            .inner
5447            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
5448                fidl::encoding::Flexible::new(payload),
5449                self.tx_id,
5450                0x77d19a494b78ba8c,
5451                fidl::encoding::DynamicFlags::FLEXIBLE,
5452            )
5453    }
5454}
5455
5456#[must_use = "FIDL methods require a response to be sent"]
5457#[derive(Debug)]
5458pub struct BufferCollectionWaitForAllBuffersAllocatedResponder {
5459    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5460    tx_id: u32,
5461}
5462
5463/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5464/// if the responder is dropped without sending a response, so that the client
5465/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5466impl std::ops::Drop for BufferCollectionWaitForAllBuffersAllocatedResponder {
5467    fn drop(&mut self) {
5468        self.control_handle.shutdown();
5469        // Safety: drops once, never accessed again
5470        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5471    }
5472}
5473
5474impl fidl::endpoints::Responder for BufferCollectionWaitForAllBuffersAllocatedResponder {
5475    type ControlHandle = BufferCollectionControlHandle;
5476
5477    fn control_handle(&self) -> &BufferCollectionControlHandle {
5478        &self.control_handle
5479    }
5480
5481    fn drop_without_shutdown(mut self) {
5482        // Safety: drops once, never accessed again due to mem::forget
5483        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5484        // Prevent Drop from running (which would shut down the channel)
5485        std::mem::forget(self);
5486    }
5487}
5488
5489impl BufferCollectionWaitForAllBuffersAllocatedResponder {
5490    /// Sends a response to the FIDL transaction.
5491    ///
5492    /// Sets the channel to shutdown if an error occurs.
5493    pub fn send(
5494        self,
5495        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5496    ) -> Result<(), fidl::Error> {
5497        let _result = self.send_raw(result);
5498        if _result.is_err() {
5499            self.control_handle.shutdown();
5500        }
5501        self.drop_without_shutdown();
5502        _result
5503    }
5504
5505    /// Similar to "send" but does not shutdown the channel if an error occurs.
5506    pub fn send_no_shutdown_on_err(
5507        self,
5508        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5509    ) -> Result<(), fidl::Error> {
5510        let _result = self.send_raw(result);
5511        self.drop_without_shutdown();
5512        _result
5513    }
5514
5515    fn send_raw(
5516        &self,
5517        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5518    ) -> Result<(), fidl::Error> {
5519        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5520            BufferCollectionWaitForAllBuffersAllocatedResponse,
5521            Error,
5522        >>(
5523            fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
5524            self.tx_id,
5525            0x62300344b61404e,
5526            fidl::encoding::DynamicFlags::FLEXIBLE,
5527        )
5528    }
5529}
5530
5531#[must_use = "FIDL methods require a response to be sent"]
5532#[derive(Debug)]
5533pub struct BufferCollectionCheckAllBuffersAllocatedResponder {
5534    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5535    tx_id: u32,
5536}
5537
5538/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5539/// if the responder is dropped without sending a response, so that the client
5540/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5541impl std::ops::Drop for BufferCollectionCheckAllBuffersAllocatedResponder {
5542    fn drop(&mut self) {
5543        self.control_handle.shutdown();
5544        // Safety: drops once, never accessed again
5545        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5546    }
5547}
5548
5549impl fidl::endpoints::Responder for BufferCollectionCheckAllBuffersAllocatedResponder {
5550    type ControlHandle = BufferCollectionControlHandle;
5551
5552    fn control_handle(&self) -> &BufferCollectionControlHandle {
5553        &self.control_handle
5554    }
5555
5556    fn drop_without_shutdown(mut self) {
5557        // Safety: drops once, never accessed again due to mem::forget
5558        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5559        // Prevent Drop from running (which would shut down the channel)
5560        std::mem::forget(self);
5561    }
5562}
5563
5564impl BufferCollectionCheckAllBuffersAllocatedResponder {
5565    /// Sends a response to the FIDL transaction.
5566    ///
5567    /// Sets the channel to shutdown if an error occurs.
5568    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5569        let _result = self.send_raw(result);
5570        if _result.is_err() {
5571            self.control_handle.shutdown();
5572        }
5573        self.drop_without_shutdown();
5574        _result
5575    }
5576
5577    /// Similar to "send" but does not shutdown the channel if an error occurs.
5578    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5579        let _result = self.send_raw(result);
5580        self.drop_without_shutdown();
5581        _result
5582    }
5583
5584    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5585        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5586            fidl::encoding::EmptyStruct,
5587            Error,
5588        >>(
5589            fidl::encoding::FlexibleResult::new(result),
5590            self.tx_id,
5591            0x35a5fe77ce939c10,
5592            fidl::encoding::DynamicFlags::FLEXIBLE,
5593        )
5594    }
5595}
5596
5597#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
5598pub struct BufferCollectionTokenMarker;
5599
5600impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenMarker {
5601    type Proxy = BufferCollectionTokenProxy;
5602    type RequestStream = BufferCollectionTokenRequestStream;
5603    #[cfg(target_os = "fuchsia")]
5604    type SynchronousProxy = BufferCollectionTokenSynchronousProxy;
5605
5606    const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionToken";
5607}
5608
5609pub trait BufferCollectionTokenProxyInterface: Send + Sync {
5610    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
5611    fn r#sync(&self) -> Self::SyncResponseFut;
5612    fn r#release(&self) -> Result<(), fidl::Error>;
5613    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
5614    fn r#set_debug_client_info(
5615        &self,
5616        payload: &NodeSetDebugClientInfoRequest,
5617    ) -> Result<(), fidl::Error>;
5618    fn r#set_debug_timeout_log_deadline(
5619        &self,
5620        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5621    ) -> Result<(), fidl::Error>;
5622    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
5623    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
5624        + Send;
5625    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
5626    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
5627        + Send;
5628    fn r#is_alternate_for(
5629        &self,
5630        payload: NodeIsAlternateForRequest,
5631    ) -> Self::IsAlternateForResponseFut;
5632    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
5633        + Send;
5634    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
5635    fn r#set_weak(&self) -> Result<(), fidl::Error>;
5636    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
5637    fn r#attach_node_tracking(
5638        &self,
5639        payload: NodeAttachNodeTrackingRequest,
5640    ) -> Result<(), fidl::Error>;
5641    type DuplicateSyncResponseFut: std::future::Future<
5642            Output = Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error>,
5643        > + Send;
5644    fn r#duplicate_sync(
5645        &self,
5646        payload: &BufferCollectionTokenDuplicateSyncRequest,
5647    ) -> Self::DuplicateSyncResponseFut;
5648    fn r#duplicate(
5649        &self,
5650        payload: BufferCollectionTokenDuplicateRequest,
5651    ) -> Result<(), fidl::Error>;
5652    fn r#set_dispensable(&self) -> Result<(), fidl::Error>;
5653    fn r#create_buffer_collection_token_group(
5654        &self,
5655        payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
5656    ) -> Result<(), fidl::Error>;
5657}
5658#[derive(Debug)]
5659#[cfg(target_os = "fuchsia")]
5660pub struct BufferCollectionTokenSynchronousProxy {
5661    client: fidl::client::sync::Client,
5662}
5663
5664#[cfg(target_os = "fuchsia")]
5665impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenSynchronousProxy {
5666    type Proxy = BufferCollectionTokenProxy;
5667    type Protocol = BufferCollectionTokenMarker;
5668
5669    fn from_channel(inner: fidl::Channel) -> Self {
5670        Self::new(inner)
5671    }
5672
5673    fn into_channel(self) -> fidl::Channel {
5674        self.client.into_channel()
5675    }
5676
5677    fn as_channel(&self) -> &fidl::Channel {
5678        self.client.as_channel()
5679    }
5680}
5681
5682#[cfg(target_os = "fuchsia")]
5683impl BufferCollectionTokenSynchronousProxy {
5684    pub fn new(channel: fidl::Channel) -> Self {
5685        let protocol_name =
5686            <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
5687        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
5688    }
5689
5690    pub fn into_channel(self) -> fidl::Channel {
5691        self.client.into_channel()
5692    }
5693
5694    /// Waits until an event arrives and returns it. It is safe for other
5695    /// threads to make concurrent requests while waiting for an event.
5696    pub fn wait_for_event(
5697        &self,
5698        deadline: zx::MonotonicInstant,
5699    ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
5700        BufferCollectionTokenEvent::decode(self.client.wait_for_event(deadline)?)
5701    }
5702
5703    /// Ensure that previous messages have been received server side. This is
5704    /// particularly useful after previous messages that created new tokens,
5705    /// because a token must be known to the sysmem server before sending the
5706    /// token to another participant.
5707    ///
5708    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
5709    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
5710    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
5711    /// to mitigate the possibility of a hostile/fake
5712    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
5713    /// Another way is to pass the token to
5714    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
5715    /// the token as part of exchanging it for a
5716    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
5717    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
5718    /// of stalling.
5719    ///
5720    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
5721    /// and then starting and completing a `Sync`, it's then safe to send the
5722    /// `BufferCollectionToken` client ends to other participants knowing the
5723    /// server will recognize the tokens when they're sent by the other
5724    /// participants to sysmem in a
5725    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
5726    /// efficient way to create tokens while avoiding unnecessary round trips.
5727    ///
5728    /// Other options include waiting for each
5729    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
5730    /// individually (using separate call to `Sync` after each), or calling
5731    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
5732    /// converted to a `BufferCollection` via
5733    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
5734    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
5735    /// the sync step and can create multiple tokens at once.
5736    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
5737        let _response = self.client.send_query::<
5738            fidl::encoding::EmptyPayload,
5739            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
5740        >(
5741            (),
5742            0x11ac2555cf575b54,
5743            fidl::encoding::DynamicFlags::FLEXIBLE,
5744            ___deadline,
5745        )?
5746        .into_result::<BufferCollectionTokenMarker>("sync")?;
5747        Ok(_response)
5748    }
5749
5750    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
5751    ///
5752    /// Normally a participant will convert a `BufferCollectionToken` into a
5753    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
5754    /// `Release` via the token (and then close the channel immediately or
5755    /// shortly later in response to server closing the server end), which
5756    /// avoids causing buffer collection failure. Without a prior `Release`,
5757    /// closing the `BufferCollectionToken` client end will cause buffer
5758    /// collection failure.
5759    ///
5760    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
5761    ///
5762    /// By default the server handles unexpected closure of a
5763    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
5764    /// first) by failing the buffer collection. Partly this is to expedite
5765    /// closing VMO handles to reclaim memory when any participant fails. If a
5766    /// participant would like to cleanly close a `BufferCollection` without
5767    /// causing buffer collection failure, the participant can send `Release`
5768    /// before closing the `BufferCollection` client end. The `Release` can
5769    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
5770    /// buffer collection won't require constraints from this node in order to
5771    /// allocate. If after `SetConstraints`, the constraints are retained and
5772    /// aggregated, despite the lack of `BufferCollection` connection at the
5773    /// time of constraints aggregation.
5774    ///
5775    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
5776    ///
5777    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
5778    /// end (without `Release` first) will trigger failure of the buffer
5779    /// collection. To close a `BufferCollectionTokenGroup` channel without
5780    /// failing the buffer collection, ensure that AllChildrenPresent() has been
5781    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
5782    /// client end.
5783    ///
5784    /// If `Release` occurs before
5785    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
5786    /// buffer collection will fail (triggered by reception of `Release` without
5787    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
5788    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
5789    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
5790    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
5791    /// close requires `AllChildrenPresent` (if not already sent), then
5792    /// `Release`, then close client end.
5793    ///
5794    /// If `Release` occurs after `AllChildrenPresent`, the children and all
5795    /// their constraints remain intact (just as they would if the
5796    /// `BufferCollectionTokenGroup` channel had remained open), and the client
5797    /// end close doesn't trigger buffer collection failure.
5798    ///
5799    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
5800    ///
5801    /// For brevity, the per-channel-protocol paragraphs above ignore the
5802    /// separate failure domain created by
5803    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
5804    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
5805    /// unexpectedly closes (without `Release` first) and that client end is
5806    /// under a failure domain, instead of failing the whole buffer collection,
5807    /// the failure domain is failed, but the buffer collection itself is
5808    /// isolated from failure of the failure domain. Such failure domains can be
5809    /// nested, in which case only the inner-most failure domain in which the
5810    /// `Node` resides fails.
5811    pub fn r#release(&self) -> Result<(), fidl::Error> {
5812        self.client.send::<fidl::encoding::EmptyPayload>(
5813            (),
5814            0x6a5cae7d6d6e04c6,
5815            fidl::encoding::DynamicFlags::FLEXIBLE,
5816        )
5817    }
5818
5819    /// Set a name for VMOs in this buffer collection.
5820    ///
5821    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
5822    /// will be truncated to fit. The name of the vmo will be suffixed with the
5823    /// buffer index within the collection (if the suffix fits within
5824    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
5825    /// listed in the inspect data.
5826    ///
5827    /// The name only affects VMOs allocated after the name is set; this call
5828    /// does not rename existing VMOs. If multiple clients set different names
5829    /// then the larger priority value will win. Setting a new name with the
5830    /// same priority as a prior name doesn't change the name.
5831    ///
5832    /// All table fields are currently required.
5833    ///
5834    /// + request `priority` The name is only set if this is the first `SetName`
5835    ///   or if `priority` is greater than any previous `priority` value in
5836    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
5837    /// + request `name` The name for VMOs created under this buffer collection.
5838    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
5839        self.client.send::<NodeSetNameRequest>(
5840            payload,
5841            0xb41f1624f48c1e9,
5842            fidl::encoding::DynamicFlags::FLEXIBLE,
5843        )
5844    }
5845
5846    /// Set information about the current client that can be used by sysmem to
5847    /// help diagnose leaking memory and allocation stalls waiting for a
5848    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
5849    ///
5850    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
5851    /// `Node`(s) derived from this `Node`, unless overriden by
5852    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
5853    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
5854    ///
5855    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
5856    /// `Allocator` is the most efficient way to ensure that all
5857    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
5858    /// set, and is also more efficient than separately sending the same debug
5859    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
5860    /// created [`fuchsia.sysmem2/Node`].
5861    ///
5862    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
5863    /// indicate which client is closing their channel first, leading to subtree
5864    /// failure (which can be normal if the purpose of the subtree is over, but
5865    /// if happening earlier than expected, the client-channel-specific name can
5866    /// help diagnose where the failure is first coming from, from sysmem's
5867    /// point of view).
5868    ///
5869    /// All table fields are currently required.
5870    ///
5871    /// + request `name` This can be an arbitrary string, but the current
5872    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
5873    /// + request `id` This can be an arbitrary id, but the current process ID
5874    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
5875    pub fn r#set_debug_client_info(
5876        &self,
5877        mut payload: &NodeSetDebugClientInfoRequest,
5878    ) -> Result<(), fidl::Error> {
5879        self.client.send::<NodeSetDebugClientInfoRequest>(
5880            payload,
5881            0x5cde8914608d99b1,
5882            fidl::encoding::DynamicFlags::FLEXIBLE,
5883        )
5884    }
5885
5886    /// Sysmem logs a warning if sysmem hasn't seen
5887    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
5888    /// within 5 seconds after creation of a new collection.
5889    ///
5890    /// Clients can call this method to change when the log is printed. If
5891    /// multiple client set the deadline, it's unspecified which deadline will
5892    /// take effect.
5893    ///
5894    /// In most cases the default works well.
5895    ///
5896    /// All table fields are currently required.
5897    ///
5898    /// + request `deadline` The time at which sysmem will start trying to log
5899    ///   the warning, unless all constraints are with sysmem by then.
5900    pub fn r#set_debug_timeout_log_deadline(
5901        &self,
5902        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5903    ) -> Result<(), fidl::Error> {
5904        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
5905            payload,
5906            0x716b0af13d5c0806,
5907            fidl::encoding::DynamicFlags::FLEXIBLE,
5908        )
5909    }
5910
5911    /// This enables verbose logging for the buffer collection.
5912    ///
5913    /// Verbose logging includes constraints set via
5914    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
5915    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
5916    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
5917    /// the tree of `Node`(s).
5918    ///
5919    /// Normally sysmem prints only a single line complaint when aggregation
5920    /// fails, with just the specific detailed reason that aggregation failed,
5921    /// with little surrounding context.  While this is often enough to diagnose
5922    /// a problem if only a small change was made and everything was working
5923    /// before the small change, it's often not particularly helpful for getting
5924    /// a new buffer collection to work for the first time.  Especially with
5925    /// more complex trees of nodes, involving things like
5926    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
5927    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
5928    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
5929    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
5930    /// looks like and why it's failing a logical allocation, or why a tree or
5931    /// subtree is failing sooner than expected.
5932    ///
5933    /// The intent of the extra logging is to be acceptable from a performance
5934    /// point of view, under the assumption that verbose logging is only enabled
5935    /// on a low number of buffer collections. If we're not tracking down a bug,
5936    /// we shouldn't send this message.
5937    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
5938        self.client.send::<fidl::encoding::EmptyPayload>(
5939            (),
5940            0x5209c77415b4dfad,
5941            fidl::encoding::DynamicFlags::FLEXIBLE,
5942        )
5943    }
5944
5945    /// This gets a handle that can be used as a parameter to
5946    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
5947    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
5948    /// client obtained this handle from this `Node`.
5949    ///
5950    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
5951    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
5952    /// despite the two calls typically being on different channels.
5953    ///
5954    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
5955    ///
5956    /// All table fields are currently required.
5957    ///
5958    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
5959    ///   different `Node` channel, to prove that the client obtained the handle
5960    ///   from this `Node`.
5961    pub fn r#get_node_ref(
5962        &self,
5963        ___deadline: zx::MonotonicInstant,
5964    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
5965        let _response = self.client.send_query::<
5966            fidl::encoding::EmptyPayload,
5967            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
5968        >(
5969            (),
5970            0x5b3d0e51614df053,
5971            fidl::encoding::DynamicFlags::FLEXIBLE,
5972            ___deadline,
5973        )?
5974        .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
5975        Ok(_response)
5976    }
5977
5978    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
5979    /// rooted at a different child token of a common parent
5980    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
5981    /// passed-in `node_ref`.
5982    ///
5983    /// This call is for assisting with admission control de-duplication, and
5984    /// with debugging.
5985    ///
5986    /// The `node_ref` must be obtained using
5987    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
5988    ///
5989    /// The `node_ref` can be a duplicated handle; it's not necessary to call
5990    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
5991    ///
5992    /// If a calling token may not actually be a valid token at all due to a
5993    /// potentially hostile/untrusted provider of the token, call
5994    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
5995    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
5996    /// never responds due to a calling token not being a real token (not really
5997    /// talking to sysmem).  Another option is to call
5998    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
5999    /// which also validates the token along with converting it to a
6000    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6001    ///
6002    /// All table fields are currently required.
6003    ///
6004    /// - response `is_alternate`
6005    ///   - true: The first parent node in common between the calling node and
6006    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
6007    ///     that the calling `Node` and the `node_ref` `Node` will not have both
6008    ///     their constraints apply - rather sysmem will choose one or the other
6009    ///     of the constraints - never both.  This is because only one child of
6010    ///     a `BufferCollectionTokenGroup` is selected during logical
6011    ///     allocation, with only that one child's subtree contributing to
6012    ///     constraints aggregation.
6013    ///   - false: The first parent node in common between the calling `Node`
6014    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6015    ///     Currently, this means the first parent node in common is a
6016    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
6017    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
6018    ///     `Node` may have both their constraints apply during constraints
6019    ///     aggregation of the logical allocation, if both `Node`(s) are
6020    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6021    ///     this case, there is no `BufferCollectionTokenGroup` that will
6022    ///     directly prevent the two `Node`(s) from both being selected and
6023    ///     their constraints both aggregated, but even when false, one or both
6024    ///     `Node`(s) may still be eliminated from consideration if one or both
6025    ///     `Node`(s) has a direct or indirect parent
6026    ///     `BufferCollectionTokenGroup` which selects a child subtree other
6027    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
6028    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6029    ///   associated with the same buffer collection as the calling `Node`.
6030    ///   Another reason for this error is if the `node_ref` is an
6031    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6032    ///   a real `node_ref` obtained from `GetNodeRef`.
6033    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6034    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6035    ///   the needed rights expected on a real `node_ref`.
6036    /// * No other failing status codes are returned by this call.  However,
6037    ///   sysmem may add additional codes in future, so the client should have
6038    ///   sensible default handling for any failing status code.
6039    pub fn r#is_alternate_for(
6040        &self,
6041        mut payload: NodeIsAlternateForRequest,
6042        ___deadline: zx::MonotonicInstant,
6043    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
6044        let _response = self.client.send_query::<
6045            NodeIsAlternateForRequest,
6046            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
6047        >(
6048            &mut payload,
6049            0x3a58e00157e0825,
6050            fidl::encoding::DynamicFlags::FLEXIBLE,
6051            ___deadline,
6052        )?
6053        .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
6054        Ok(_response.map(|x| x))
6055    }
6056
6057    /// Get the buffer collection ID. This ID is also available from
6058    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6059    /// within the collection).
6060    ///
6061    /// This call is mainly useful in situations where we can't convey a
6062    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6063    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6064    /// handle, which can be joined back up with a `BufferCollection` client end
6065    /// that was created via a different path. Prefer to convey a
6066    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6067    ///
6068    /// Trusting a `buffer_collection_id` value from a source other than sysmem
6069    /// is analogous to trusting a koid value from a source other than zircon.
6070    /// Both should be avoided unless really necessary, and both require
6071    /// caution. In some situations it may be reasonable to refer to a
6072    /// pre-established `BufferCollection` by `buffer_collection_id` via a
6073    /// protocol for efficiency reasons, but an incoming value purporting to be
6074    /// a `buffer_collection_id` is not sufficient alone to justify granting the
6075    /// sender of the `buffer_collection_id` any capability. The sender must
6076    /// first prove to a receiver that the sender has/had a VMO or has/had a
6077    /// `BufferCollectionToken` to the same collection by sending a handle that
6078    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6079    /// `buffer_collection_id` value. The receiver should take care to avoid
6080    /// assuming that a sender had a `BufferCollectionToken` in cases where the
6081    /// sender has only proven that the sender had a VMO.
6082    ///
6083    /// - response `buffer_collection_id` This ID is unique per buffer
6084    ///   collection per boot. Each buffer is uniquely identified by the
6085    ///   `buffer_collection_id` and `buffer_index` together.
6086    pub fn r#get_buffer_collection_id(
6087        &self,
6088        ___deadline: zx::MonotonicInstant,
6089    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
6090        let _response = self.client.send_query::<
6091            fidl::encoding::EmptyPayload,
6092            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
6093        >(
6094            (),
6095            0x77d19a494b78ba8c,
6096            fidl::encoding::DynamicFlags::FLEXIBLE,
6097            ___deadline,
6098        )?
6099        .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
6100        Ok(_response)
6101    }
6102
6103    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6104    /// created after this message to weak, which means that a client's `Node`
6105    /// client end (or a child created after this message) is not alone
6106    /// sufficient to keep allocated VMOs alive.
6107    ///
6108    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6109    /// `close_weak_asap`.
6110    ///
6111    /// This message is only permitted before the `Node` becomes ready for
6112    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6113    ///   * `BufferCollectionToken`: any time
6114    ///   * `BufferCollection`: before `SetConstraints`
6115    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6116    ///
6117    /// Currently, no conversion from strong `Node` to weak `Node` after ready
6118    /// for allocation is provided, but a client can simulate that by creating
6119    /// an additional `Node` before allocation and setting that additional
6120    /// `Node` to weak, and then potentially at some point later sending
6121    /// `Release` and closing the client end of the client's strong `Node`, but
6122    /// keeping the client's weak `Node`.
6123    ///
6124    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6125    /// collection failure (all `Node` client end(s) will see
6126    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6127    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6128    /// this situation until all `Node`(s) are ready for allocation. For initial
6129    /// allocation to succeed, at least one strong `Node` is required to exist
6130    /// at allocation time, but after that client receives VMO handles, that
6131    /// client can `BufferCollection.Release` and close the client end without
6132    /// causing this type of failure.
6133    ///
6134    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6135    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6136    /// separately as appropriate.
6137    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6138        self.client.send::<fidl::encoding::EmptyPayload>(
6139            (),
6140            0x22dd3ea514eeffe1,
6141            fidl::encoding::DynamicFlags::FLEXIBLE,
6142        )
6143    }
6144
6145    /// This indicates to sysmem that the client is prepared to pay attention to
6146    /// `close_weak_asap`.
6147    ///
6148    /// If sent, this message must be before
6149    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6150    ///
6151    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6152    /// send this message before `WaitForAllBuffersAllocated`, or a parent
6153    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6154    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6155    /// trigger buffer collection failure.
6156    ///
6157    /// This message is necessary because weak sysmem VMOs have not always been
6158    /// a thing, so older clients are not aware of the need to pay attention to
6159    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6160    /// sysmem weak VMO handles asap. By having this message and requiring
6161    /// participants to indicate their acceptance of this aspect of the overall
6162    /// protocol, we avoid situations where an older client is delivered a weak
6163    /// VMO without any way for sysmem to get that VMO to close quickly later
6164    /// (and on a per-buffer basis).
6165    ///
6166    /// A participant that doesn't handle `close_weak_asap` and also doesn't
6167    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
6168    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
6169    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
6170    /// same participant has a child/delegate which does retrieve VMOs, that
6171    /// child/delegate will need to send `SetWeakOk` before
6172    /// `WaitForAllBuffersAllocated`.
6173    ///
6174    /// + request `for_child_nodes_also` If present and true, this means direct
6175    ///   child nodes of this node created after this message plus all
6176    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
6177    ///   those nodes. Any child node of this node that was created before this
6178    ///   message is not included. This setting is "sticky" in the sense that a
6179    ///   subsequent `SetWeakOk` without this bool set to true does not reset
6180    ///   the server-side bool. If this creates a problem for a participant, a
6181    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
6182    ///   tokens instead, as appropriate. A participant should only set
6183    ///   `for_child_nodes_also` true if the participant can really promise to
6184    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
6185    ///   weak VMO handles held by participants holding the corresponding child
6186    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
6187    ///   which are using sysmem(1) can be weak, despite the clients of those
6188    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
6189    ///   direct way to find out about `close_weak_asap`. This only applies to
6190    ///   descendents of this `Node` which are using sysmem(1), not to this
6191    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
6192    ///   token, which will fail allocation unless an ancestor of this `Node`
6193    ///   specified `for_child_nodes_also` true.
6194    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
6195        self.client.send::<NodeSetWeakOkRequest>(
6196            &mut payload,
6197            0x38a44fc4d7724be9,
6198            fidl::encoding::DynamicFlags::FLEXIBLE,
6199        )
6200    }
6201
6202    /// The server_end will be closed after this `Node` and any child nodes have
6203    /// have released their buffer counts, making those counts available for
6204    /// reservation by a different `Node` via
6205    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
6206    ///
6207    /// The `Node` buffer counts may not be released until the entire tree of
6208    /// `Node`(s) is closed or failed, because
6209    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
6210    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
6211    /// `Node` buffer counts remain reserved until the orphaned node is later
6212    /// cleaned up.
6213    ///
6214    /// If the `Node` exceeds a fairly large number of attached eventpair server
6215    /// ends, a log message will indicate this and the `Node` (and the
6216    /// appropriate) sub-tree will fail.
6217    ///
6218    /// The `server_end` will remain open when
6219    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
6220    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
6221    /// [`fuchsia.sysmem2/BufferCollection`].
6222    ///
6223    /// This message can also be used with a
6224    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6225    pub fn r#attach_node_tracking(
6226        &self,
6227        mut payload: NodeAttachNodeTrackingRequest,
6228    ) -> Result<(), fidl::Error> {
6229        self.client.send::<NodeAttachNodeTrackingRequest>(
6230            &mut payload,
6231            0x3f22f2a293d3cdac,
6232            fidl::encoding::DynamicFlags::FLEXIBLE,
6233        )
6234    }
6235
6236    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
6237    /// one, referring to the same buffer collection.
6238    ///
6239    /// The created tokens are children of this token in the
6240    /// [`fuchsia.sysmem2/Node`] heirarchy.
6241    ///
6242    /// This method can be used to add more participants, by transferring the
6243    /// newly created tokens to additional participants.
6244    ///
6245    /// A new token will be returned for each entry in the
6246    /// `rights_attenuation_masks` array.
6247    ///
6248    /// If the called token may not actually be a valid token due to a
6249    /// potentially hostile/untrusted provider of the token, consider using
6250    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6251    /// instead of potentially getting stuck indefinitely if
6252    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
6253    /// due to the calling token not being a real token.
6254    ///
6255    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
6256    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
6257    /// method, because the sync step is included in this call, at the cost of a
6258    /// round trip during this call.
6259    ///
6260    /// All tokens must be turned in to sysmem via
6261    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6262    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6263    /// successfully allocate buffers (or to logically allocate buffers in the
6264    /// case of subtrees involving
6265    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
6266    ///
6267    /// All table fields are currently required.
6268    ///
6269    /// + request `rights_attenuation_mask` In each entry of
6270    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
6271    ///   in the buffer VMO rights obtainable via the corresponding returned
6272    ///   token. This allows an initiator or intermediary participant to
6273    ///   attenuate the rights available to a participant. This does not allow a
6274    ///   participant to gain rights that the participant doesn't already have.
6275    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
6276    ///   attenuation should be applied.
6277    /// - response `tokens` The client ends of each newly created token.
6278    pub fn r#duplicate_sync(
6279        &self,
6280        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
6281        ___deadline: zx::MonotonicInstant,
6282    ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
6283        let _response = self.client.send_query::<
6284            BufferCollectionTokenDuplicateSyncRequest,
6285            fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
6286        >(
6287            payload,
6288            0x1c1af9919d1ca45c,
6289            fidl::encoding::DynamicFlags::FLEXIBLE,
6290            ___deadline,
6291        )?
6292        .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
6293        Ok(_response)
6294    }
6295
6296    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
6297    /// one, referring to the same buffer collection.
6298    ///
6299    /// The created token is a child of this token in the
6300    /// [`fuchsia.sysmem2/Node`] heirarchy.
6301    ///
6302    /// This method can be used to add a participant, by transferring the newly
6303    /// created token to another participant.
6304    ///
6305    /// This one-way message can be used instead of the two-way
6306    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
6307    /// performance sensitive cases where it would be undesireable to wait for
6308    /// sysmem to respond to
6309    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
6310    /// client code isn't structured to make it easy to duplicate all the needed
6311    /// tokens at once.
6312    ///
6313    /// After sending one or more `Duplicate` messages, and before sending the
6314    /// newly created child tokens to other participants (or to other
6315    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
6316    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
6317    /// `Sync` call can be made on the token, or on the `BufferCollection`
6318    /// obtained by passing this token to `BindSharedCollection`.  Either will
6319    /// ensure that the server knows about the tokens created via `Duplicate`
6320    /// before the other participant sends the token to the server via separate
6321    /// `Allocator` channel.
6322    ///
6323    /// All tokens must be turned in via
6324    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6325    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6326    /// successfully allocate buffers.
6327    ///
6328    /// All table fields are currently required.
6329    ///
6330    /// + request `rights_attenuation_mask` The rights bits that are zero in
6331    ///   this mask will be absent in the buffer VMO rights obtainable via the
6332    ///   client end of `token_request`. This allows an initiator or
6333    ///   intermediary participant to attenuate the rights available to a
6334    ///   delegate participant. This does not allow a participant to gain rights
6335    ///   that the participant doesn't already have. The value
6336    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
6337    ///   should be applied.
6338    ///   + These values for rights_attenuation_mask result in no attenuation:
6339    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
6340    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
6341    ///       computed)
6342    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
6343    /// + request `token_request` is the server end of a `BufferCollectionToken`
6344    ///   channel. The client end of this channel acts as another participant in
6345    ///   the shared buffer collection.
6346    pub fn r#duplicate(
6347        &self,
6348        mut payload: BufferCollectionTokenDuplicateRequest,
6349    ) -> Result<(), fidl::Error> {
6350        self.client.send::<BufferCollectionTokenDuplicateRequest>(
6351            &mut payload,
6352            0x73e78f92ee7fb887,
6353            fidl::encoding::DynamicFlags::FLEXIBLE,
6354        )
6355    }
6356
6357    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
6358    ///
6359    /// When the `BufferCollectionToken` is converted to a
6360    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
6361    /// the `BufferCollection` also.
6362    ///
6363    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
6364    /// client end without having sent
6365    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
6366    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
6367    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
6368    /// to the root `Node`, which fails the whole buffer collection. In
6369    /// contrast, a dispensable `Node` can fail after buffers are allocated
6370    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
6371    /// heirarchy.
6372    ///
6373    /// The dispensable `Node` participates in constraints aggregation along
6374    /// with its parent before buffer allocation. If the dispensable `Node`
6375    /// fails before buffers are allocated, the failure propagates to the
6376    /// dispensable `Node`'s parent.
6377    ///
6378    /// After buffers are allocated, failure of the dispensable `Node` (or any
6379    /// child of the dispensable `Node`) does not propagate to the dispensable
6380    /// `Node`'s parent. Failure does propagate from a normal child of a
6381    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
6382    /// blocked from reaching its parent if the child is attached using
6383    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
6384    /// dispensable and the failure occurred after allocation.
6385    ///
6386    /// A dispensable `Node` can be used in cases where a participant needs to
6387    /// provide constraints, but after buffers are allocated, the participant
6388    /// can fail without causing buffer collection failure from the parent
6389    /// `Node`'s point of view.
6390    ///
6391    /// In contrast, `BufferCollection.AttachToken` can be used to create a
6392    /// `BufferCollectionToken` which does not participate in constraints
6393    /// aggregation with its parent `Node`, and whose failure at any time does
6394    /// not propagate to its parent `Node`, and whose potential delay providing
6395    /// constraints does not prevent the parent `Node` from completing its
6396    /// buffer allocation.
6397    ///
6398    /// An initiator (creator of the root `Node` using
6399    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
6400    /// scenarios choose to initially use a dispensable `Node` for a first
6401    /// instance of a participant, and then later if the first instance of that
6402    /// participant fails, a new second instance of that participant my be given
6403    /// a `BufferCollectionToken` created with `AttachToken`.
6404    ///
6405    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
6406    /// shortly before sending the dispensable `BufferCollectionToken` to a
6407    /// delegate participant. Because `SetDispensable` prevents propagation of
6408    /// child `Node` failure to parent `Node`(s), if the client was relying on
6409    /// noticing child failure via failure of the parent `Node` retained by the
6410    /// client, the client may instead need to notice failure via other means.
6411    /// If other means aren't available/convenient, the client can instead
6412    /// retain the dispensable `Node` and create a child `Node` under that to
6413    /// send to the delegate participant, retaining this `Node` in order to
6414    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
6415    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
6416    /// (e.g. starting a new instance of the delegate participant and handing it
6417    /// a `BufferCollectionToken` created using
6418    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
6419    /// and clean up in a client-specific way).
6420    ///
6421    /// While it is possible (and potentially useful) to `SetDispensable` on a
6422    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
6423    /// to later replace a failed dispensable `Node` that was a direct child of
6424    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
6425    /// (since there's no `AttachToken` on a group). Instead, to enable
6426    /// `AttachToken` replacement in this case, create an additional
6427    /// non-dispensable token that's a direct child of the group and make the
6428    /// existing dispensable token a child of the additional token.  This way,
6429    /// the additional token that is a direct child of the group has
6430    /// `BufferCollection.AttachToken` which can be used to replace the failed
6431    /// dispensable token.
6432    ///
6433    /// `SetDispensable` on an already-dispensable token is idempotent.
6434    pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
6435        self.client.send::<fidl::encoding::EmptyPayload>(
6436            (),
6437            0x228acf979254df8b,
6438            fidl::encoding::DynamicFlags::FLEXIBLE,
6439        )
6440    }
6441
6442    /// Create a logical OR among a set of tokens, called a
6443    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6444    ///
6445    /// Most sysmem clients and many participants don't need to care about this
6446    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
6447    /// a participant wants to attempt to include one set of delegate
6448    /// participants, but if constraints don't combine successfully that way,
6449    /// fall back to a different (possibly overlapping) set of delegate
6450    /// participants, and/or fall back to a less demanding strategy (in terms of
6451    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
6452    /// across all involved delegate participants). In such cases, a
6453    /// `BufferCollectionTokenGroup` is useful.
6454    ///
6455    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
6456    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
6457    /// which are not selected during aggregation will fail (close), which a
6458    /// potential participant should notice when their `BufferCollection`
6459    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
6460    /// clean up the speculative usage that didn't end up happening (this is
6461    /// simimlar to a normal `BufferCollection` server end closing on failure to
6462    /// allocate a logical buffer collection or later async failure of a buffer
6463    /// collection).
6464    ///
6465    /// See comments on protocol `BufferCollectionTokenGroup`.
6466    ///
6467    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
6468    /// applied to the whole group can be achieved with a
6469    /// `BufferCollectionToken` for this purpose as a direct parent of the
6470    /// `BufferCollectionTokenGroup`.
6471    ///
6472    /// All table fields are currently required.
6473    ///
6474    /// + request `group_request` The server end of a
6475    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
6476    pub fn r#create_buffer_collection_token_group(
6477        &self,
6478        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
6479    ) -> Result<(), fidl::Error> {
6480        self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
6481            &mut payload,
6482            0x30f8d48e77bd36f2,
6483            fidl::encoding::DynamicFlags::FLEXIBLE,
6484        )
6485    }
6486}
6487
6488#[derive(Debug, Clone)]
6489pub struct BufferCollectionTokenProxy {
6490    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
6491}
6492
6493impl fidl::endpoints::Proxy for BufferCollectionTokenProxy {
6494    type Protocol = BufferCollectionTokenMarker;
6495
6496    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
6497        Self::new(inner)
6498    }
6499
6500    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
6501        self.client.into_channel().map_err(|client| Self { client })
6502    }
6503
6504    fn as_channel(&self) -> &::fidl::AsyncChannel {
6505        self.client.as_channel()
6506    }
6507}
6508
6509impl BufferCollectionTokenProxy {
6510    /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionToken.
6511    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
6512        let protocol_name =
6513            <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
6514        Self { client: fidl::client::Client::new(channel, protocol_name) }
6515    }
6516
6517    /// Get a Stream of events from the remote end of the protocol.
6518    ///
6519    /// # Panics
6520    ///
6521    /// Panics if the event stream was already taken.
6522    pub fn take_event_stream(&self) -> BufferCollectionTokenEventStream {
6523        BufferCollectionTokenEventStream { event_receiver: self.client.take_event_receiver() }
6524    }
6525
6526    /// Ensure that previous messages have been received server side. This is
6527    /// particularly useful after previous messages that created new tokens,
6528    /// because a token must be known to the sysmem server before sending the
6529    /// token to another participant.
6530    ///
6531    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
6532    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
6533    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
6534    /// to mitigate the possibility of a hostile/fake
6535    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
6536    /// Another way is to pass the token to
6537    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
6538    /// the token as part of exchanging it for a
6539    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
6540    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
6541    /// of stalling.
6542    ///
6543    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
6544    /// and then starting and completing a `Sync`, it's then safe to send the
6545    /// `BufferCollectionToken` client ends to other participants knowing the
6546    /// server will recognize the tokens when they're sent by the other
6547    /// participants to sysmem in a
6548    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
6549    /// efficient way to create tokens while avoiding unnecessary round trips.
6550    ///
6551    /// Other options include waiting for each
6552    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
6553    /// individually (using separate call to `Sync` after each), or calling
6554    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
6555    /// converted to a `BufferCollection` via
6556    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
6557    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
6558    /// the sync step and can create multiple tokens at once.
6559    pub fn r#sync(
6560        &self,
6561    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
6562        BufferCollectionTokenProxyInterface::r#sync(self)
6563    }
6564
6565    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
6566    ///
6567    /// Normally a participant will convert a `BufferCollectionToken` into a
6568    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
6569    /// `Release` via the token (and then close the channel immediately or
6570    /// shortly later in response to server closing the server end), which
6571    /// avoids causing buffer collection failure. Without a prior `Release`,
6572    /// closing the `BufferCollectionToken` client end will cause buffer
6573    /// collection failure.
6574    ///
6575    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
6576    ///
6577    /// By default the server handles unexpected closure of a
6578    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
6579    /// first) by failing the buffer collection. Partly this is to expedite
6580    /// closing VMO handles to reclaim memory when any participant fails. If a
6581    /// participant would like to cleanly close a `BufferCollection` without
6582    /// causing buffer collection failure, the participant can send `Release`
6583    /// before closing the `BufferCollection` client end. The `Release` can
6584    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
6585    /// buffer collection won't require constraints from this node in order to
6586    /// allocate. If after `SetConstraints`, the constraints are retained and
6587    /// aggregated, despite the lack of `BufferCollection` connection at the
6588    /// time of constraints aggregation.
6589    ///
6590    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
6591    ///
6592    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
6593    /// end (without `Release` first) will trigger failure of the buffer
6594    /// collection. To close a `BufferCollectionTokenGroup` channel without
6595    /// failing the buffer collection, ensure that AllChildrenPresent() has been
6596    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
6597    /// client end.
6598    ///
6599    /// If `Release` occurs before
6600    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
6601    /// buffer collection will fail (triggered by reception of `Release` without
6602    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
6603    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
6604    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
6605    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
6606    /// close requires `AllChildrenPresent` (if not already sent), then
6607    /// `Release`, then close client end.
6608    ///
6609    /// If `Release` occurs after `AllChildrenPresent`, the children and all
6610    /// their constraints remain intact (just as they would if the
6611    /// `BufferCollectionTokenGroup` channel had remained open), and the client
6612    /// end close doesn't trigger buffer collection failure.
6613    ///
6614    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
6615    ///
6616    /// For brevity, the per-channel-protocol paragraphs above ignore the
6617    /// separate failure domain created by
6618    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
6619    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
6620    /// unexpectedly closes (without `Release` first) and that client end is
6621    /// under a failure domain, instead of failing the whole buffer collection,
6622    /// the failure domain is failed, but the buffer collection itself is
6623    /// isolated from failure of the failure domain. Such failure domains can be
6624    /// nested, in which case only the inner-most failure domain in which the
6625    /// `Node` resides fails.
6626    pub fn r#release(&self) -> Result<(), fidl::Error> {
6627        BufferCollectionTokenProxyInterface::r#release(self)
6628    }
6629
6630    /// Set a name for VMOs in this buffer collection.
6631    ///
6632    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
6633    /// will be truncated to fit. The name of the vmo will be suffixed with the
6634    /// buffer index within the collection (if the suffix fits within
6635    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
6636    /// listed in the inspect data.
6637    ///
6638    /// The name only affects VMOs allocated after the name is set; this call
6639    /// does not rename existing VMOs. If multiple clients set different names
6640    /// then the larger priority value will win. Setting a new name with the
6641    /// same priority as a prior name doesn't change the name.
6642    ///
6643    /// All table fields are currently required.
6644    ///
6645    /// + request `priority` The name is only set if this is the first `SetName`
6646    ///   or if `priority` is greater than any previous `priority` value in
6647    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
6648    /// + request `name` The name for VMOs created under this buffer collection.
6649    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
6650        BufferCollectionTokenProxyInterface::r#set_name(self, payload)
6651    }
6652
6653    /// Set information about the current client that can be used by sysmem to
6654    /// help diagnose leaking memory and allocation stalls waiting for a
6655    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
6656    ///
6657    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
6658    /// `Node`(s) derived from this `Node`, unless overriden by
6659    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
6660    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
6661    ///
6662    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
6663    /// `Allocator` is the most efficient way to ensure that all
6664    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
6665    /// set, and is also more efficient than separately sending the same debug
6666    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
6667    /// created [`fuchsia.sysmem2/Node`].
6668    ///
6669    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
6670    /// indicate which client is closing their channel first, leading to subtree
6671    /// failure (which can be normal if the purpose of the subtree is over, but
6672    /// if happening earlier than expected, the client-channel-specific name can
6673    /// help diagnose where the failure is first coming from, from sysmem's
6674    /// point of view).
6675    ///
6676    /// All table fields are currently required.
6677    ///
6678    /// + request `name` This can be an arbitrary string, but the current
6679    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
6680    /// + request `id` This can be an arbitrary id, but the current process ID
6681    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
6682    pub fn r#set_debug_client_info(
6683        &self,
6684        mut payload: &NodeSetDebugClientInfoRequest,
6685    ) -> Result<(), fidl::Error> {
6686        BufferCollectionTokenProxyInterface::r#set_debug_client_info(self, payload)
6687    }
6688
6689    /// Sysmem logs a warning if sysmem hasn't seen
6690    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
6691    /// within 5 seconds after creation of a new collection.
6692    ///
6693    /// Clients can call this method to change when the log is printed. If
6694    /// multiple client set the deadline, it's unspecified which deadline will
6695    /// take effect.
6696    ///
6697    /// In most cases the default works well.
6698    ///
6699    /// All table fields are currently required.
6700    ///
6701    /// + request `deadline` The time at which sysmem will start trying to log
6702    ///   the warning, unless all constraints are with sysmem by then.
6703    pub fn r#set_debug_timeout_log_deadline(
6704        &self,
6705        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
6706    ) -> Result<(), fidl::Error> {
6707        BufferCollectionTokenProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
6708    }
6709
6710    /// This enables verbose logging for the buffer collection.
6711    ///
6712    /// Verbose logging includes constraints set via
6713    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
6714    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
6715    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
6716    /// the tree of `Node`(s).
6717    ///
6718    /// Normally sysmem prints only a single line complaint when aggregation
6719    /// fails, with just the specific detailed reason that aggregation failed,
6720    /// with little surrounding context.  While this is often enough to diagnose
6721    /// a problem if only a small change was made and everything was working
6722    /// before the small change, it's often not particularly helpful for getting
6723    /// a new buffer collection to work for the first time.  Especially with
6724    /// more complex trees of nodes, involving things like
6725    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
6726    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
6727    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
6728    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
6729    /// looks like and why it's failing a logical allocation, or why a tree or
6730    /// subtree is failing sooner than expected.
6731    ///
6732    /// The intent of the extra logging is to be acceptable from a performance
6733    /// point of view, under the assumption that verbose logging is only enabled
6734    /// on a low number of buffer collections. If we're not tracking down a bug,
6735    /// we shouldn't send this message.
6736    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
6737        BufferCollectionTokenProxyInterface::r#set_verbose_logging(self)
6738    }
6739
6740    /// This gets a handle that can be used as a parameter to
6741    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
6742    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
6743    /// client obtained this handle from this `Node`.
6744    ///
6745    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
6746    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
6747    /// despite the two calls typically being on different channels.
6748    ///
6749    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6750    ///
6751    /// All table fields are currently required.
6752    ///
6753    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6754    ///   different `Node` channel, to prove that the client obtained the handle
6755    ///   from this `Node`.
6756    pub fn r#get_node_ref(
6757        &self,
6758    ) -> fidl::client::QueryResponseFut<
6759        NodeGetNodeRefResponse,
6760        fidl::encoding::DefaultFuchsiaResourceDialect,
6761    > {
6762        BufferCollectionTokenProxyInterface::r#get_node_ref(self)
6763    }
6764
6765    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6766    /// rooted at a different child token of a common parent
6767    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6768    /// passed-in `node_ref`.
6769    ///
6770    /// This call is for assisting with admission control de-duplication, and
6771    /// with debugging.
6772    ///
6773    /// The `node_ref` must be obtained using
6774    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6775    ///
6776    /// The `node_ref` can be a duplicated handle; it's not necessary to call
6777    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6778    ///
6779    /// If a calling token may not actually be a valid token at all due to a
6780    /// potentially hostile/untrusted provider of the token, call
6781    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6782    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6783    /// never responds due to a calling token not being a real token (not really
6784    /// talking to sysmem).  Another option is to call
6785    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6786    /// which also validates the token along with converting it to a
6787    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6788    ///
6789    /// All table fields are currently required.
6790    ///
6791    /// - response `is_alternate`
6792    ///   - true: The first parent node in common between the calling node and
6793    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
6794    ///     that the calling `Node` and the `node_ref` `Node` will not have both
6795    ///     their constraints apply - rather sysmem will choose one or the other
6796    ///     of the constraints - never both.  This is because only one child of
6797    ///     a `BufferCollectionTokenGroup` is selected during logical
6798    ///     allocation, with only that one child's subtree contributing to
6799    ///     constraints aggregation.
6800    ///   - false: The first parent node in common between the calling `Node`
6801    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6802    ///     Currently, this means the first parent node in common is a
6803    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
6804    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
6805    ///     `Node` may have both their constraints apply during constraints
6806    ///     aggregation of the logical allocation, if both `Node`(s) are
6807    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6808    ///     this case, there is no `BufferCollectionTokenGroup` that will
6809    ///     directly prevent the two `Node`(s) from both being selected and
6810    ///     their constraints both aggregated, but even when false, one or both
6811    ///     `Node`(s) may still be eliminated from consideration if one or both
6812    ///     `Node`(s) has a direct or indirect parent
6813    ///     `BufferCollectionTokenGroup` which selects a child subtree other
6814    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
6815    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6816    ///   associated with the same buffer collection as the calling `Node`.
6817    ///   Another reason for this error is if the `node_ref` is an
6818    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6819    ///   a real `node_ref` obtained from `GetNodeRef`.
6820    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6821    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6822    ///   the needed rights expected on a real `node_ref`.
6823    /// * No other failing status codes are returned by this call.  However,
6824    ///   sysmem may add additional codes in future, so the client should have
6825    ///   sensible default handling for any failing status code.
6826    pub fn r#is_alternate_for(
6827        &self,
6828        mut payload: NodeIsAlternateForRequest,
6829    ) -> fidl::client::QueryResponseFut<
6830        NodeIsAlternateForResult,
6831        fidl::encoding::DefaultFuchsiaResourceDialect,
6832    > {
6833        BufferCollectionTokenProxyInterface::r#is_alternate_for(self, payload)
6834    }
6835
6836    /// Get the buffer collection ID. This ID is also available from
6837    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6838    /// within the collection).
6839    ///
6840    /// This call is mainly useful in situations where we can't convey a
6841    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6842    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6843    /// handle, which can be joined back up with a `BufferCollection` client end
6844    /// that was created via a different path. Prefer to convey a
6845    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6846    ///
6847    /// Trusting a `buffer_collection_id` value from a source other than sysmem
6848    /// is analogous to trusting a koid value from a source other than zircon.
6849    /// Both should be avoided unless really necessary, and both require
6850    /// caution. In some situations it may be reasonable to refer to a
6851    /// pre-established `BufferCollection` by `buffer_collection_id` via a
6852    /// protocol for efficiency reasons, but an incoming value purporting to be
6853    /// a `buffer_collection_id` is not sufficient alone to justify granting the
6854    /// sender of the `buffer_collection_id` any capability. The sender must
6855    /// first prove to a receiver that the sender has/had a VMO or has/had a
6856    /// `BufferCollectionToken` to the same collection by sending a handle that
6857    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6858    /// `buffer_collection_id` value. The receiver should take care to avoid
6859    /// assuming that a sender had a `BufferCollectionToken` in cases where the
6860    /// sender has only proven that the sender had a VMO.
6861    ///
6862    /// - response `buffer_collection_id` This ID is unique per buffer
6863    ///   collection per boot. Each buffer is uniquely identified by the
6864    ///   `buffer_collection_id` and `buffer_index` together.
6865    pub fn r#get_buffer_collection_id(
6866        &self,
6867    ) -> fidl::client::QueryResponseFut<
6868        NodeGetBufferCollectionIdResponse,
6869        fidl::encoding::DefaultFuchsiaResourceDialect,
6870    > {
6871        BufferCollectionTokenProxyInterface::r#get_buffer_collection_id(self)
6872    }
6873
6874    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6875    /// created after this message to weak, which means that a client's `Node`
6876    /// client end (or a child created after this message) is not alone
6877    /// sufficient to keep allocated VMOs alive.
6878    ///
6879    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6880    /// `close_weak_asap`.
6881    ///
6882    /// This message is only permitted before the `Node` becomes ready for
6883    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6884    ///   * `BufferCollectionToken`: any time
6885    ///   * `BufferCollection`: before `SetConstraints`
6886    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6887    ///
6888    /// Currently, no conversion from strong `Node` to weak `Node` after ready
6889    /// for allocation is provided, but a client can simulate that by creating
6890    /// an additional `Node` before allocation and setting that additional
6891    /// `Node` to weak, and then potentially at some point later sending
6892    /// `Release` and closing the client end of the client's strong `Node`, but
6893    /// keeping the client's weak `Node`.
6894    ///
6895    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6896    /// collection failure (all `Node` client end(s) will see
6897    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6898    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6899    /// this situation until all `Node`(s) are ready for allocation. For initial
6900    /// allocation to succeed, at least one strong `Node` is required to exist
6901    /// at allocation time, but after that client receives VMO handles, that
6902    /// client can `BufferCollection.Release` and close the client end without
6903    /// causing this type of failure.
6904    ///
6905    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6906    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6907    /// separately as appropriate.
6908    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6909        BufferCollectionTokenProxyInterface::r#set_weak(self)
6910    }
6911
6912    /// This indicates to sysmem that the client is prepared to pay attention to
6913    /// `close_weak_asap`.
6914    ///
6915    /// If sent, this message must be before
6916    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6917    ///
6918    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6919    /// send this message before `WaitForAllBuffersAllocated`, or a parent
6920    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6921    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6922    /// trigger buffer collection failure.
6923    ///
6924    /// This message is necessary because weak sysmem VMOs have not always been
6925    /// a thing, so older clients are not aware of the need to pay attention to
6926    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6927    /// sysmem weak VMO handles asap. By having this message and requiring
6928    /// participants to indicate their acceptance of this aspect of the overall
6929    /// protocol, we avoid situations where an older client is delivered a weak
6930    /// VMO without any way for sysmem to get that VMO to close quickly later
6931    /// (and on a per-buffer basis).
6932    ///
6933    /// A participant that doesn't handle `close_weak_asap` and also doesn't
6934    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
6935    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
6936    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
6937    /// same participant has a child/delegate which does retrieve VMOs, that
6938    /// child/delegate will need to send `SetWeakOk` before
6939    /// `WaitForAllBuffersAllocated`.
6940    ///
6941    /// + request `for_child_nodes_also` If present and true, this means direct
6942    ///   child nodes of this node created after this message plus all
6943    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
6944    ///   those nodes. Any child node of this node that was created before this
6945    ///   message is not included. This setting is "sticky" in the sense that a
6946    ///   subsequent `SetWeakOk` without this bool set to true does not reset
6947    ///   the server-side bool. If this creates a problem for a participant, a
6948    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
6949    ///   tokens instead, as appropriate. A participant should only set
6950    ///   `for_child_nodes_also` true if the participant can really promise to
6951    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
6952    ///   weak VMO handles held by participants holding the corresponding child
6953    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
6954    ///   which are using sysmem(1) can be weak, despite the clients of those
6955    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
6956    ///   direct way to find out about `close_weak_asap`. This only applies to
6957    ///   descendents of this `Node` which are using sysmem(1), not to this
6958    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
6959    ///   token, which will fail allocation unless an ancestor of this `Node`
6960    ///   specified `for_child_nodes_also` true.
6961    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
6962        BufferCollectionTokenProxyInterface::r#set_weak_ok(self, payload)
6963    }
6964
6965    /// The server_end will be closed after this `Node` and any child nodes have
6966    /// have released their buffer counts, making those counts available for
6967    /// reservation by a different `Node` via
6968    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
6969    ///
6970    /// The `Node` buffer counts may not be released until the entire tree of
6971    /// `Node`(s) is closed or failed, because
6972    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
6973    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
6974    /// `Node` buffer counts remain reserved until the orphaned node is later
6975    /// cleaned up.
6976    ///
6977    /// If the `Node` exceeds a fairly large number of attached eventpair server
6978    /// ends, a log message will indicate this and the `Node` (and the
6979    /// appropriate) sub-tree will fail.
6980    ///
6981    /// The `server_end` will remain open when
6982    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
6983    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
6984    /// [`fuchsia.sysmem2/BufferCollection`].
6985    ///
6986    /// This message can also be used with a
6987    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6988    pub fn r#attach_node_tracking(
6989        &self,
6990        mut payload: NodeAttachNodeTrackingRequest,
6991    ) -> Result<(), fidl::Error> {
6992        BufferCollectionTokenProxyInterface::r#attach_node_tracking(self, payload)
6993    }
6994
6995    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
6996    /// one, referring to the same buffer collection.
6997    ///
6998    /// The created tokens are children of this token in the
6999    /// [`fuchsia.sysmem2/Node`] heirarchy.
7000    ///
7001    /// This method can be used to add more participants, by transferring the
7002    /// newly created tokens to additional participants.
7003    ///
7004    /// A new token will be returned for each entry in the
7005    /// `rights_attenuation_masks` array.
7006    ///
7007    /// If the called token may not actually be a valid token due to a
7008    /// potentially hostile/untrusted provider of the token, consider using
7009    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
7010    /// instead of potentially getting stuck indefinitely if
7011    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
7012    /// due to the calling token not being a real token.
7013    ///
7014    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
7015    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
7016    /// method, because the sync step is included in this call, at the cost of a
7017    /// round trip during this call.
7018    ///
7019    /// All tokens must be turned in to sysmem via
7020    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7021    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7022    /// successfully allocate buffers (or to logically allocate buffers in the
7023    /// case of subtrees involving
7024    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
7025    ///
7026    /// All table fields are currently required.
7027    ///
7028    /// + request `rights_attenuation_mask` In each entry of
7029    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
7030    ///   in the buffer VMO rights obtainable via the corresponding returned
7031    ///   token. This allows an initiator or intermediary participant to
7032    ///   attenuate the rights available to a participant. This does not allow a
7033    ///   participant to gain rights that the participant doesn't already have.
7034    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
7035    ///   attenuation should be applied.
7036    /// - response `tokens` The client ends of each newly created token.
7037    pub fn r#duplicate_sync(
7038        &self,
7039        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7040    ) -> fidl::client::QueryResponseFut<
7041        BufferCollectionTokenDuplicateSyncResponse,
7042        fidl::encoding::DefaultFuchsiaResourceDialect,
7043    > {
7044        BufferCollectionTokenProxyInterface::r#duplicate_sync(self, payload)
7045    }
7046
7047    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
7048    /// one, referring to the same buffer collection.
7049    ///
7050    /// The created token is a child of this token in the
7051    /// [`fuchsia.sysmem2/Node`] heirarchy.
7052    ///
7053    /// This method can be used to add a participant, by transferring the newly
7054    /// created token to another participant.
7055    ///
7056    /// This one-way message can be used instead of the two-way
7057    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
7058    /// performance sensitive cases where it would be undesireable to wait for
7059    /// sysmem to respond to
7060    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
7061    /// client code isn't structured to make it easy to duplicate all the needed
7062    /// tokens at once.
7063    ///
7064    /// After sending one or more `Duplicate` messages, and before sending the
7065    /// newly created child tokens to other participants (or to other
7066    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
7067    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
7068    /// `Sync` call can be made on the token, or on the `BufferCollection`
7069    /// obtained by passing this token to `BindSharedCollection`.  Either will
7070    /// ensure that the server knows about the tokens created via `Duplicate`
7071    /// before the other participant sends the token to the server via separate
7072    /// `Allocator` channel.
7073    ///
7074    /// All tokens must be turned in via
7075    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7076    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7077    /// successfully allocate buffers.
7078    ///
7079    /// All table fields are currently required.
7080    ///
7081    /// + request `rights_attenuation_mask` The rights bits that are zero in
7082    ///   this mask will be absent in the buffer VMO rights obtainable via the
7083    ///   client end of `token_request`. This allows an initiator or
7084    ///   intermediary participant to attenuate the rights available to a
7085    ///   delegate participant. This does not allow a participant to gain rights
7086    ///   that the participant doesn't already have. The value
7087    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
7088    ///   should be applied.
7089    ///   + These values for rights_attenuation_mask result in no attenuation:
7090    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
7091    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
7092    ///       computed)
7093    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
7094    /// + request `token_request` is the server end of a `BufferCollectionToken`
7095    ///   channel. The client end of this channel acts as another participant in
7096    ///   the shared buffer collection.
7097    pub fn r#duplicate(
7098        &self,
7099        mut payload: BufferCollectionTokenDuplicateRequest,
7100    ) -> Result<(), fidl::Error> {
7101        BufferCollectionTokenProxyInterface::r#duplicate(self, payload)
7102    }
7103
7104    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
7105    ///
7106    /// When the `BufferCollectionToken` is converted to a
7107    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
7108    /// the `BufferCollection` also.
7109    ///
7110    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
7111    /// client end without having sent
7112    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
7113    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
7114    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
7115    /// to the root `Node`, which fails the whole buffer collection. In
7116    /// contrast, a dispensable `Node` can fail after buffers are allocated
7117    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
7118    /// heirarchy.
7119    ///
7120    /// The dispensable `Node` participates in constraints aggregation along
7121    /// with its parent before buffer allocation. If the dispensable `Node`
7122    /// fails before buffers are allocated, the failure propagates to the
7123    /// dispensable `Node`'s parent.
7124    ///
7125    /// After buffers are allocated, failure of the dispensable `Node` (or any
7126    /// child of the dispensable `Node`) does not propagate to the dispensable
7127    /// `Node`'s parent. Failure does propagate from a normal child of a
7128    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
7129    /// blocked from reaching its parent if the child is attached using
7130    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
7131    /// dispensable and the failure occurred after allocation.
7132    ///
7133    /// A dispensable `Node` can be used in cases where a participant needs to
7134    /// provide constraints, but after buffers are allocated, the participant
7135    /// can fail without causing buffer collection failure from the parent
7136    /// `Node`'s point of view.
7137    ///
7138    /// In contrast, `BufferCollection.AttachToken` can be used to create a
7139    /// `BufferCollectionToken` which does not participate in constraints
7140    /// aggregation with its parent `Node`, and whose failure at any time does
7141    /// not propagate to its parent `Node`, and whose potential delay providing
7142    /// constraints does not prevent the parent `Node` from completing its
7143    /// buffer allocation.
7144    ///
7145    /// An initiator (creator of the root `Node` using
7146    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
7147    /// scenarios choose to initially use a dispensable `Node` for a first
7148    /// instance of a participant, and then later if the first instance of that
7149    /// participant fails, a new second instance of that participant my be given
7150    /// a `BufferCollectionToken` created with `AttachToken`.
7151    ///
7152    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
7153    /// shortly before sending the dispensable `BufferCollectionToken` to a
7154    /// delegate participant. Because `SetDispensable` prevents propagation of
7155    /// child `Node` failure to parent `Node`(s), if the client was relying on
7156    /// noticing child failure via failure of the parent `Node` retained by the
7157    /// client, the client may instead need to notice failure via other means.
7158    /// If other means aren't available/convenient, the client can instead
7159    /// retain the dispensable `Node` and create a child `Node` under that to
7160    /// send to the delegate participant, retaining this `Node` in order to
7161    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
7162    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
7163    /// (e.g. starting a new instance of the delegate participant and handing it
7164    /// a `BufferCollectionToken` created using
7165    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
7166    /// and clean up in a client-specific way).
7167    ///
7168    /// While it is possible (and potentially useful) to `SetDispensable` on a
7169    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
7170    /// to later replace a failed dispensable `Node` that was a direct child of
7171    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
7172    /// (since there's no `AttachToken` on a group). Instead, to enable
7173    /// `AttachToken` replacement in this case, create an additional
7174    /// non-dispensable token that's a direct child of the group and make the
7175    /// existing dispensable token a child of the additional token.  This way,
7176    /// the additional token that is a direct child of the group has
7177    /// `BufferCollection.AttachToken` which can be used to replace the failed
7178    /// dispensable token.
7179    ///
7180    /// `SetDispensable` on an already-dispensable token is idempotent.
7181    pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7182        BufferCollectionTokenProxyInterface::r#set_dispensable(self)
7183    }
7184
7185    /// Create a logical OR among a set of tokens, called a
7186    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7187    ///
7188    /// Most sysmem clients and many participants don't need to care about this
7189    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
7190    /// a participant wants to attempt to include one set of delegate
7191    /// participants, but if constraints don't combine successfully that way,
7192    /// fall back to a different (possibly overlapping) set of delegate
7193    /// participants, and/or fall back to a less demanding strategy (in terms of
7194    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
7195    /// across all involved delegate participants). In such cases, a
7196    /// `BufferCollectionTokenGroup` is useful.
7197    ///
7198    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
7199    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
7200    /// which are not selected during aggregation will fail (close), which a
7201    /// potential participant should notice when their `BufferCollection`
7202    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
7203    /// clean up the speculative usage that didn't end up happening (this is
7204    /// simimlar to a normal `BufferCollection` server end closing on failure to
7205    /// allocate a logical buffer collection or later async failure of a buffer
7206    /// collection).
7207    ///
7208    /// See comments on protocol `BufferCollectionTokenGroup`.
7209    ///
7210    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
7211    /// applied to the whole group can be achieved with a
7212    /// `BufferCollectionToken` for this purpose as a direct parent of the
7213    /// `BufferCollectionTokenGroup`.
7214    ///
7215    /// All table fields are currently required.
7216    ///
7217    /// + request `group_request` The server end of a
7218    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
7219    pub fn r#create_buffer_collection_token_group(
7220        &self,
7221        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7222    ) -> Result<(), fidl::Error> {
7223        BufferCollectionTokenProxyInterface::r#create_buffer_collection_token_group(self, payload)
7224    }
7225}
7226
7227impl BufferCollectionTokenProxyInterface for BufferCollectionTokenProxy {
7228    type SyncResponseFut =
7229        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
7230    fn r#sync(&self) -> Self::SyncResponseFut {
7231        fn _decode(
7232            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7233        ) -> Result<(), fidl::Error> {
7234            let _response = fidl::client::decode_transaction_body::<
7235                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
7236                fidl::encoding::DefaultFuchsiaResourceDialect,
7237                0x11ac2555cf575b54,
7238            >(_buf?)?
7239            .into_result::<BufferCollectionTokenMarker>("sync")?;
7240            Ok(_response)
7241        }
7242        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
7243            (),
7244            0x11ac2555cf575b54,
7245            fidl::encoding::DynamicFlags::FLEXIBLE,
7246            _decode,
7247        )
7248    }
7249
7250    fn r#release(&self) -> Result<(), fidl::Error> {
7251        self.client.send::<fidl::encoding::EmptyPayload>(
7252            (),
7253            0x6a5cae7d6d6e04c6,
7254            fidl::encoding::DynamicFlags::FLEXIBLE,
7255        )
7256    }
7257
7258    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
7259        self.client.send::<NodeSetNameRequest>(
7260            payload,
7261            0xb41f1624f48c1e9,
7262            fidl::encoding::DynamicFlags::FLEXIBLE,
7263        )
7264    }
7265
7266    fn r#set_debug_client_info(
7267        &self,
7268        mut payload: &NodeSetDebugClientInfoRequest,
7269    ) -> Result<(), fidl::Error> {
7270        self.client.send::<NodeSetDebugClientInfoRequest>(
7271            payload,
7272            0x5cde8914608d99b1,
7273            fidl::encoding::DynamicFlags::FLEXIBLE,
7274        )
7275    }
7276
7277    fn r#set_debug_timeout_log_deadline(
7278        &self,
7279        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
7280    ) -> Result<(), fidl::Error> {
7281        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
7282            payload,
7283            0x716b0af13d5c0806,
7284            fidl::encoding::DynamicFlags::FLEXIBLE,
7285        )
7286    }
7287
7288    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
7289        self.client.send::<fidl::encoding::EmptyPayload>(
7290            (),
7291            0x5209c77415b4dfad,
7292            fidl::encoding::DynamicFlags::FLEXIBLE,
7293        )
7294    }
7295
7296    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
7297        NodeGetNodeRefResponse,
7298        fidl::encoding::DefaultFuchsiaResourceDialect,
7299    >;
7300    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
7301        fn _decode(
7302            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7303        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
7304            let _response = fidl::client::decode_transaction_body::<
7305                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
7306                fidl::encoding::DefaultFuchsiaResourceDialect,
7307                0x5b3d0e51614df053,
7308            >(_buf?)?
7309            .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
7310            Ok(_response)
7311        }
7312        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
7313            (),
7314            0x5b3d0e51614df053,
7315            fidl::encoding::DynamicFlags::FLEXIBLE,
7316            _decode,
7317        )
7318    }
7319
7320    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
7321        NodeIsAlternateForResult,
7322        fidl::encoding::DefaultFuchsiaResourceDialect,
7323    >;
7324    fn r#is_alternate_for(
7325        &self,
7326        mut payload: NodeIsAlternateForRequest,
7327    ) -> Self::IsAlternateForResponseFut {
7328        fn _decode(
7329            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7330        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
7331            let _response = fidl::client::decode_transaction_body::<
7332                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
7333                fidl::encoding::DefaultFuchsiaResourceDialect,
7334                0x3a58e00157e0825,
7335            >(_buf?)?
7336            .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
7337            Ok(_response.map(|x| x))
7338        }
7339        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
7340            &mut payload,
7341            0x3a58e00157e0825,
7342            fidl::encoding::DynamicFlags::FLEXIBLE,
7343            _decode,
7344        )
7345    }
7346
7347    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
7348        NodeGetBufferCollectionIdResponse,
7349        fidl::encoding::DefaultFuchsiaResourceDialect,
7350    >;
7351    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
7352        fn _decode(
7353            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7354        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
7355            let _response = fidl::client::decode_transaction_body::<
7356                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
7357                fidl::encoding::DefaultFuchsiaResourceDialect,
7358                0x77d19a494b78ba8c,
7359            >(_buf?)?
7360            .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
7361            Ok(_response)
7362        }
7363        self.client.send_query_and_decode::<
7364            fidl::encoding::EmptyPayload,
7365            NodeGetBufferCollectionIdResponse,
7366        >(
7367            (),
7368            0x77d19a494b78ba8c,
7369            fidl::encoding::DynamicFlags::FLEXIBLE,
7370            _decode,
7371        )
7372    }
7373
7374    fn r#set_weak(&self) -> Result<(), fidl::Error> {
7375        self.client.send::<fidl::encoding::EmptyPayload>(
7376            (),
7377            0x22dd3ea514eeffe1,
7378            fidl::encoding::DynamicFlags::FLEXIBLE,
7379        )
7380    }
7381
7382    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7383        self.client.send::<NodeSetWeakOkRequest>(
7384            &mut payload,
7385            0x38a44fc4d7724be9,
7386            fidl::encoding::DynamicFlags::FLEXIBLE,
7387        )
7388    }
7389
7390    fn r#attach_node_tracking(
7391        &self,
7392        mut payload: NodeAttachNodeTrackingRequest,
7393    ) -> Result<(), fidl::Error> {
7394        self.client.send::<NodeAttachNodeTrackingRequest>(
7395            &mut payload,
7396            0x3f22f2a293d3cdac,
7397            fidl::encoding::DynamicFlags::FLEXIBLE,
7398        )
7399    }
7400
7401    type DuplicateSyncResponseFut = fidl::client::QueryResponseFut<
7402        BufferCollectionTokenDuplicateSyncResponse,
7403        fidl::encoding::DefaultFuchsiaResourceDialect,
7404    >;
7405    fn r#duplicate_sync(
7406        &self,
7407        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7408    ) -> Self::DuplicateSyncResponseFut {
7409        fn _decode(
7410            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7411        ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
7412            let _response = fidl::client::decode_transaction_body::<
7413                fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
7414                fidl::encoding::DefaultFuchsiaResourceDialect,
7415                0x1c1af9919d1ca45c,
7416            >(_buf?)?
7417            .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
7418            Ok(_response)
7419        }
7420        self.client.send_query_and_decode::<
7421            BufferCollectionTokenDuplicateSyncRequest,
7422            BufferCollectionTokenDuplicateSyncResponse,
7423        >(
7424            payload,
7425            0x1c1af9919d1ca45c,
7426            fidl::encoding::DynamicFlags::FLEXIBLE,
7427            _decode,
7428        )
7429    }
7430
7431    fn r#duplicate(
7432        &self,
7433        mut payload: BufferCollectionTokenDuplicateRequest,
7434    ) -> Result<(), fidl::Error> {
7435        self.client.send::<BufferCollectionTokenDuplicateRequest>(
7436            &mut payload,
7437            0x73e78f92ee7fb887,
7438            fidl::encoding::DynamicFlags::FLEXIBLE,
7439        )
7440    }
7441
7442    fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7443        self.client.send::<fidl::encoding::EmptyPayload>(
7444            (),
7445            0x228acf979254df8b,
7446            fidl::encoding::DynamicFlags::FLEXIBLE,
7447        )
7448    }
7449
7450    fn r#create_buffer_collection_token_group(
7451        &self,
7452        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7453    ) -> Result<(), fidl::Error> {
7454        self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
7455            &mut payload,
7456            0x30f8d48e77bd36f2,
7457            fidl::encoding::DynamicFlags::FLEXIBLE,
7458        )
7459    }
7460}
7461
7462pub struct BufferCollectionTokenEventStream {
7463    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
7464}
7465
7466impl std::marker::Unpin for BufferCollectionTokenEventStream {}
7467
7468impl futures::stream::FusedStream for BufferCollectionTokenEventStream {
7469    fn is_terminated(&self) -> bool {
7470        self.event_receiver.is_terminated()
7471    }
7472}
7473
7474impl futures::Stream for BufferCollectionTokenEventStream {
7475    type Item = Result<BufferCollectionTokenEvent, fidl::Error>;
7476
7477    fn poll_next(
7478        mut self: std::pin::Pin<&mut Self>,
7479        cx: &mut std::task::Context<'_>,
7480    ) -> std::task::Poll<Option<Self::Item>> {
7481        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
7482            &mut self.event_receiver,
7483            cx
7484        )?) {
7485            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenEvent::decode(buf))),
7486            None => std::task::Poll::Ready(None),
7487        }
7488    }
7489}
7490
7491#[derive(Debug)]
7492pub enum BufferCollectionTokenEvent {
7493    #[non_exhaustive]
7494    _UnknownEvent {
7495        /// Ordinal of the event that was sent.
7496        ordinal: u64,
7497    },
7498}
7499
7500impl BufferCollectionTokenEvent {
7501    /// Decodes a message buffer as a [`BufferCollectionTokenEvent`].
7502    fn decode(
7503        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
7504    ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
7505        let (bytes, _handles) = buf.split_mut();
7506        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7507        debug_assert_eq!(tx_header.tx_id, 0);
7508        match tx_header.ordinal {
7509            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7510                Ok(BufferCollectionTokenEvent::_UnknownEvent { ordinal: tx_header.ordinal })
7511            }
7512            _ => Err(fidl::Error::UnknownOrdinal {
7513                ordinal: tx_header.ordinal,
7514                protocol_name:
7515                    <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7516            }),
7517        }
7518    }
7519}
7520
7521/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionToken.
7522pub struct BufferCollectionTokenRequestStream {
7523    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7524    is_terminated: bool,
7525}
7526
7527impl std::marker::Unpin for BufferCollectionTokenRequestStream {}
7528
7529impl futures::stream::FusedStream for BufferCollectionTokenRequestStream {
7530    fn is_terminated(&self) -> bool {
7531        self.is_terminated
7532    }
7533}
7534
7535impl fidl::endpoints::RequestStream for BufferCollectionTokenRequestStream {
7536    type Protocol = BufferCollectionTokenMarker;
7537    type ControlHandle = BufferCollectionTokenControlHandle;
7538
7539    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
7540        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
7541    }
7542
7543    fn control_handle(&self) -> Self::ControlHandle {
7544        BufferCollectionTokenControlHandle { inner: self.inner.clone() }
7545    }
7546
7547    fn into_inner(
7548        self,
7549    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
7550    {
7551        (self.inner, self.is_terminated)
7552    }
7553
7554    fn from_inner(
7555        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7556        is_terminated: bool,
7557    ) -> Self {
7558        Self { inner, is_terminated }
7559    }
7560}
7561
7562impl futures::Stream for BufferCollectionTokenRequestStream {
7563    type Item = Result<BufferCollectionTokenRequest, fidl::Error>;
7564
7565    fn poll_next(
7566        mut self: std::pin::Pin<&mut Self>,
7567        cx: &mut std::task::Context<'_>,
7568    ) -> std::task::Poll<Option<Self::Item>> {
7569        let this = &mut *self;
7570        if this.inner.check_shutdown(cx) {
7571            this.is_terminated = true;
7572            return std::task::Poll::Ready(None);
7573        }
7574        if this.is_terminated {
7575            panic!("polled BufferCollectionTokenRequestStream after completion");
7576        }
7577        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
7578            |bytes, handles| {
7579                match this.inner.channel().read_etc(cx, bytes, handles) {
7580                    std::task::Poll::Ready(Ok(())) => {}
7581                    std::task::Poll::Pending => return std::task::Poll::Pending,
7582                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
7583                        this.is_terminated = true;
7584                        return std::task::Poll::Ready(None);
7585                    }
7586                    std::task::Poll::Ready(Err(e)) => {
7587                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
7588                            e.into(),
7589                        ))))
7590                    }
7591                }
7592
7593                // A message has been received from the channel
7594                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7595
7596                std::task::Poll::Ready(Some(match header.ordinal {
7597                0x11ac2555cf575b54 => {
7598                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7599                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7600                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7601                    let control_handle = BufferCollectionTokenControlHandle {
7602                        inner: this.inner.clone(),
7603                    };
7604                    Ok(BufferCollectionTokenRequest::Sync {
7605                        responder: BufferCollectionTokenSyncResponder {
7606                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7607                            tx_id: header.tx_id,
7608                        },
7609                    })
7610                }
7611                0x6a5cae7d6d6e04c6 => {
7612                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7613                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7614                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7615                    let control_handle = BufferCollectionTokenControlHandle {
7616                        inner: this.inner.clone(),
7617                    };
7618                    Ok(BufferCollectionTokenRequest::Release {
7619                        control_handle,
7620                    })
7621                }
7622                0xb41f1624f48c1e9 => {
7623                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7624                    let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7625                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
7626                    let control_handle = BufferCollectionTokenControlHandle {
7627                        inner: this.inner.clone(),
7628                    };
7629                    Ok(BufferCollectionTokenRequest::SetName {payload: req,
7630                        control_handle,
7631                    })
7632                }
7633                0x5cde8914608d99b1 => {
7634                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7635                    let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7636                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
7637                    let control_handle = BufferCollectionTokenControlHandle {
7638                        inner: this.inner.clone(),
7639                    };
7640                    Ok(BufferCollectionTokenRequest::SetDebugClientInfo {payload: req,
7641                        control_handle,
7642                    })
7643                }
7644                0x716b0af13d5c0806 => {
7645                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7646                    let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7647                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
7648                    let control_handle = BufferCollectionTokenControlHandle {
7649                        inner: this.inner.clone(),
7650                    };
7651                    Ok(BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {payload: req,
7652                        control_handle,
7653                    })
7654                }
7655                0x5209c77415b4dfad => {
7656                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7657                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7658                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7659                    let control_handle = BufferCollectionTokenControlHandle {
7660                        inner: this.inner.clone(),
7661                    };
7662                    Ok(BufferCollectionTokenRequest::SetVerboseLogging {
7663                        control_handle,
7664                    })
7665                }
7666                0x5b3d0e51614df053 => {
7667                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7668                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7669                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7670                    let control_handle = BufferCollectionTokenControlHandle {
7671                        inner: this.inner.clone(),
7672                    };
7673                    Ok(BufferCollectionTokenRequest::GetNodeRef {
7674                        responder: BufferCollectionTokenGetNodeRefResponder {
7675                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7676                            tx_id: header.tx_id,
7677                        },
7678                    })
7679                }
7680                0x3a58e00157e0825 => {
7681                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7682                    let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7683                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
7684                    let control_handle = BufferCollectionTokenControlHandle {
7685                        inner: this.inner.clone(),
7686                    };
7687                    Ok(BufferCollectionTokenRequest::IsAlternateFor {payload: req,
7688                        responder: BufferCollectionTokenIsAlternateForResponder {
7689                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7690                            tx_id: header.tx_id,
7691                        },
7692                    })
7693                }
7694                0x77d19a494b78ba8c => {
7695                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7696                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7697                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7698                    let control_handle = BufferCollectionTokenControlHandle {
7699                        inner: this.inner.clone(),
7700                    };
7701                    Ok(BufferCollectionTokenRequest::GetBufferCollectionId {
7702                        responder: BufferCollectionTokenGetBufferCollectionIdResponder {
7703                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7704                            tx_id: header.tx_id,
7705                        },
7706                    })
7707                }
7708                0x22dd3ea514eeffe1 => {
7709                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7710                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7711                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7712                    let control_handle = BufferCollectionTokenControlHandle {
7713                        inner: this.inner.clone(),
7714                    };
7715                    Ok(BufferCollectionTokenRequest::SetWeak {
7716                        control_handle,
7717                    })
7718                }
7719                0x38a44fc4d7724be9 => {
7720                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7721                    let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7722                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
7723                    let control_handle = BufferCollectionTokenControlHandle {
7724                        inner: this.inner.clone(),
7725                    };
7726                    Ok(BufferCollectionTokenRequest::SetWeakOk {payload: req,
7727                        control_handle,
7728                    })
7729                }
7730                0x3f22f2a293d3cdac => {
7731                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7732                    let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7733                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
7734                    let control_handle = BufferCollectionTokenControlHandle {
7735                        inner: this.inner.clone(),
7736                    };
7737                    Ok(BufferCollectionTokenRequest::AttachNodeTracking {payload: req,
7738                        control_handle,
7739                    })
7740                }
7741                0x1c1af9919d1ca45c => {
7742                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7743                    let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7744                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateSyncRequest>(&header, _body_bytes, handles, &mut req)?;
7745                    let control_handle = BufferCollectionTokenControlHandle {
7746                        inner: this.inner.clone(),
7747                    };
7748                    Ok(BufferCollectionTokenRequest::DuplicateSync {payload: req,
7749                        responder: BufferCollectionTokenDuplicateSyncResponder {
7750                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7751                            tx_id: header.tx_id,
7752                        },
7753                    })
7754                }
7755                0x73e78f92ee7fb887 => {
7756                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7757                    let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7758                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateRequest>(&header, _body_bytes, handles, &mut req)?;
7759                    let control_handle = BufferCollectionTokenControlHandle {
7760                        inner: this.inner.clone(),
7761                    };
7762                    Ok(BufferCollectionTokenRequest::Duplicate {payload: req,
7763                        control_handle,
7764                    })
7765                }
7766                0x228acf979254df8b => {
7767                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7768                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7769                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7770                    let control_handle = BufferCollectionTokenControlHandle {
7771                        inner: this.inner.clone(),
7772                    };
7773                    Ok(BufferCollectionTokenRequest::SetDispensable {
7774                        control_handle,
7775                    })
7776                }
7777                0x30f8d48e77bd36f2 => {
7778                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7779                    let mut req = fidl::new_empty!(BufferCollectionTokenCreateBufferCollectionTokenGroupRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7780                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(&header, _body_bytes, handles, &mut req)?;
7781                    let control_handle = BufferCollectionTokenControlHandle {
7782                        inner: this.inner.clone(),
7783                    };
7784                    Ok(BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {payload: req,
7785                        control_handle,
7786                    })
7787                }
7788                _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7789                    Ok(BufferCollectionTokenRequest::_UnknownMethod {
7790                        ordinal: header.ordinal,
7791                        control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7792                        method_type: fidl::MethodType::OneWay,
7793                    })
7794                }
7795                _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7796                    this.inner.send_framework_err(
7797                        fidl::encoding::FrameworkErr::UnknownMethod,
7798                        header.tx_id,
7799                        header.ordinal,
7800                        header.dynamic_flags(),
7801                        (bytes, handles),
7802                    )?;
7803                    Ok(BufferCollectionTokenRequest::_UnknownMethod {
7804                        ordinal: header.ordinal,
7805                        control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7806                        method_type: fidl::MethodType::TwoWay,
7807                    })
7808                }
7809                _ => Err(fidl::Error::UnknownOrdinal {
7810                    ordinal: header.ordinal,
7811                    protocol_name: <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7812                }),
7813            }))
7814            },
7815        )
7816    }
7817}
7818
7819/// A [`fuchsia.sysmem2/BufferCollectionToken`] is not a buffer collection, but
7820/// rather is a way to identify a specific potential shared buffer collection,
7821/// and a way to distribute that potential shared buffer collection to
7822/// additional participants prior to the buffer collection allocating any
7823/// buffers.
7824///
7825/// Epitaphs are not used in this protocol.
7826///
7827/// We use a channel for the `BufferCollectionToken` instead of a single
7828/// `eventpair` (pair) because this way we can detect error conditions like a
7829/// participant failing mid-create.
7830#[derive(Debug)]
7831pub enum BufferCollectionTokenRequest {
7832    /// Ensure that previous messages have been received server side. This is
7833    /// particularly useful after previous messages that created new tokens,
7834    /// because a token must be known to the sysmem server before sending the
7835    /// token to another participant.
7836    ///
7837    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
7838    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
7839    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
7840    /// to mitigate the possibility of a hostile/fake
7841    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
7842    /// Another way is to pass the token to
7843    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
7844    /// the token as part of exchanging it for a
7845    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
7846    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
7847    /// of stalling.
7848    ///
7849    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
7850    /// and then starting and completing a `Sync`, it's then safe to send the
7851    /// `BufferCollectionToken` client ends to other participants knowing the
7852    /// server will recognize the tokens when they're sent by the other
7853    /// participants to sysmem in a
7854    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
7855    /// efficient way to create tokens while avoiding unnecessary round trips.
7856    ///
7857    /// Other options include waiting for each
7858    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
7859    /// individually (using separate call to `Sync` after each), or calling
7860    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
7861    /// converted to a `BufferCollection` via
7862    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
7863    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
7864    /// the sync step and can create multiple tokens at once.
7865    Sync { responder: BufferCollectionTokenSyncResponder },
7866    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
7867    ///
7868    /// Normally a participant will convert a `BufferCollectionToken` into a
7869    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
7870    /// `Release` via the token (and then close the channel immediately or
7871    /// shortly later in response to server closing the server end), which
7872    /// avoids causing buffer collection failure. Without a prior `Release`,
7873    /// closing the `BufferCollectionToken` client end will cause buffer
7874    /// collection failure.
7875    ///
7876    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
7877    ///
7878    /// By default the server handles unexpected closure of a
7879    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
7880    /// first) by failing the buffer collection. Partly this is to expedite
7881    /// closing VMO handles to reclaim memory when any participant fails. If a
7882    /// participant would like to cleanly close a `BufferCollection` without
7883    /// causing buffer collection failure, the participant can send `Release`
7884    /// before closing the `BufferCollection` client end. The `Release` can
7885    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
7886    /// buffer collection won't require constraints from this node in order to
7887    /// allocate. If after `SetConstraints`, the constraints are retained and
7888    /// aggregated, despite the lack of `BufferCollection` connection at the
7889    /// time of constraints aggregation.
7890    ///
7891    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
7892    ///
7893    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
7894    /// end (without `Release` first) will trigger failure of the buffer
7895    /// collection. To close a `BufferCollectionTokenGroup` channel without
7896    /// failing the buffer collection, ensure that AllChildrenPresent() has been
7897    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
7898    /// client end.
7899    ///
7900    /// If `Release` occurs before
7901    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
7902    /// buffer collection will fail (triggered by reception of `Release` without
7903    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
7904    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
7905    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
7906    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
7907    /// close requires `AllChildrenPresent` (if not already sent), then
7908    /// `Release`, then close client end.
7909    ///
7910    /// If `Release` occurs after `AllChildrenPresent`, the children and all
7911    /// their constraints remain intact (just as they would if the
7912    /// `BufferCollectionTokenGroup` channel had remained open), and the client
7913    /// end close doesn't trigger buffer collection failure.
7914    ///
7915    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
7916    ///
7917    /// For brevity, the per-channel-protocol paragraphs above ignore the
7918    /// separate failure domain created by
7919    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
7920    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
7921    /// unexpectedly closes (without `Release` first) and that client end is
7922    /// under a failure domain, instead of failing the whole buffer collection,
7923    /// the failure domain is failed, but the buffer collection itself is
7924    /// isolated from failure of the failure domain. Such failure domains can be
7925    /// nested, in which case only the inner-most failure domain in which the
7926    /// `Node` resides fails.
7927    Release { control_handle: BufferCollectionTokenControlHandle },
7928    /// Set a name for VMOs in this buffer collection.
7929    ///
7930    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
7931    /// will be truncated to fit. The name of the vmo will be suffixed with the
7932    /// buffer index within the collection (if the suffix fits within
7933    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
7934    /// listed in the inspect data.
7935    ///
7936    /// The name only affects VMOs allocated after the name is set; this call
7937    /// does not rename existing VMOs. If multiple clients set different names
7938    /// then the larger priority value will win. Setting a new name with the
7939    /// same priority as a prior name doesn't change the name.
7940    ///
7941    /// All table fields are currently required.
7942    ///
7943    /// + request `priority` The name is only set if this is the first `SetName`
7944    ///   or if `priority` is greater than any previous `priority` value in
7945    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
7946    /// + request `name` The name for VMOs created under this buffer collection.
7947    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenControlHandle },
7948    /// Set information about the current client that can be used by sysmem to
7949    /// help diagnose leaking memory and allocation stalls waiting for a
7950    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
7951    ///
7952    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
7953    /// `Node`(s) derived from this `Node`, unless overriden by
7954    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
7955    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
7956    ///
7957    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
7958    /// `Allocator` is the most efficient way to ensure that all
7959    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
7960    /// set, and is also more efficient than separately sending the same debug
7961    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
7962    /// created [`fuchsia.sysmem2/Node`].
7963    ///
7964    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
7965    /// indicate which client is closing their channel first, leading to subtree
7966    /// failure (which can be normal if the purpose of the subtree is over, but
7967    /// if happening earlier than expected, the client-channel-specific name can
7968    /// help diagnose where the failure is first coming from, from sysmem's
7969    /// point of view).
7970    ///
7971    /// All table fields are currently required.
7972    ///
7973    /// + request `name` This can be an arbitrary string, but the current
7974    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
7975    /// + request `id` This can be an arbitrary id, but the current process ID
7976    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
7977    SetDebugClientInfo {
7978        payload: NodeSetDebugClientInfoRequest,
7979        control_handle: BufferCollectionTokenControlHandle,
7980    },
7981    /// Sysmem logs a warning if sysmem hasn't seen
7982    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
7983    /// within 5 seconds after creation of a new collection.
7984    ///
7985    /// Clients can call this method to change when the log is printed. If
7986    /// multiple client set the deadline, it's unspecified which deadline will
7987    /// take effect.
7988    ///
7989    /// In most cases the default works well.
7990    ///
7991    /// All table fields are currently required.
7992    ///
7993    /// + request `deadline` The time at which sysmem will start trying to log
7994    ///   the warning, unless all constraints are with sysmem by then.
7995    SetDebugTimeoutLogDeadline {
7996        payload: NodeSetDebugTimeoutLogDeadlineRequest,
7997        control_handle: BufferCollectionTokenControlHandle,
7998    },
7999    /// This enables verbose logging for the buffer collection.
8000    ///
8001    /// Verbose logging includes constraints set via
8002    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
8003    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
8004    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
8005    /// the tree of `Node`(s).
8006    ///
8007    /// Normally sysmem prints only a single line complaint when aggregation
8008    /// fails, with just the specific detailed reason that aggregation failed,
8009    /// with little surrounding context.  While this is often enough to diagnose
8010    /// a problem if only a small change was made and everything was working
8011    /// before the small change, it's often not particularly helpful for getting
8012    /// a new buffer collection to work for the first time.  Especially with
8013    /// more complex trees of nodes, involving things like
8014    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
8015    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
8016    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
8017    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
8018    /// looks like and why it's failing a logical allocation, or why a tree or
8019    /// subtree is failing sooner than expected.
8020    ///
8021    /// The intent of the extra logging is to be acceptable from a performance
8022    /// point of view, under the assumption that verbose logging is only enabled
8023    /// on a low number of buffer collections. If we're not tracking down a bug,
8024    /// we shouldn't send this message.
8025    SetVerboseLogging { control_handle: BufferCollectionTokenControlHandle },
8026    /// This gets a handle that can be used as a parameter to
8027    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
8028    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
8029    /// client obtained this handle from this `Node`.
8030    ///
8031    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
8032    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
8033    /// despite the two calls typically being on different channels.
8034    ///
8035    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
8036    ///
8037    /// All table fields are currently required.
8038    ///
8039    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
8040    ///   different `Node` channel, to prove that the client obtained the handle
8041    ///   from this `Node`.
8042    GetNodeRef { responder: BufferCollectionTokenGetNodeRefResponder },
8043    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
8044    /// rooted at a different child token of a common parent
8045    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
8046    /// passed-in `node_ref`.
8047    ///
8048    /// This call is for assisting with admission control de-duplication, and
8049    /// with debugging.
8050    ///
8051    /// The `node_ref` must be obtained using
8052    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
8053    ///
8054    /// The `node_ref` can be a duplicated handle; it's not necessary to call
8055    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
8056    ///
8057    /// If a calling token may not actually be a valid token at all due to a
8058    /// potentially hostile/untrusted provider of the token, call
8059    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8060    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
8061    /// never responds due to a calling token not being a real token (not really
8062    /// talking to sysmem).  Another option is to call
8063    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
8064    /// which also validates the token along with converting it to a
8065    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
8066    ///
8067    /// All table fields are currently required.
8068    ///
8069    /// - response `is_alternate`
8070    ///   - true: The first parent node in common between the calling node and
8071    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
8072    ///     that the calling `Node` and the `node_ref` `Node` will not have both
8073    ///     their constraints apply - rather sysmem will choose one or the other
8074    ///     of the constraints - never both.  This is because only one child of
8075    ///     a `BufferCollectionTokenGroup` is selected during logical
8076    ///     allocation, with only that one child's subtree contributing to
8077    ///     constraints aggregation.
8078    ///   - false: The first parent node in common between the calling `Node`
8079    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
8080    ///     Currently, this means the first parent node in common is a
8081    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
8082    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
8083    ///     `Node` may have both their constraints apply during constraints
8084    ///     aggregation of the logical allocation, if both `Node`(s) are
8085    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
8086    ///     this case, there is no `BufferCollectionTokenGroup` that will
8087    ///     directly prevent the two `Node`(s) from both being selected and
8088    ///     their constraints both aggregated, but even when false, one or both
8089    ///     `Node`(s) may still be eliminated from consideration if one or both
8090    ///     `Node`(s) has a direct or indirect parent
8091    ///     `BufferCollectionTokenGroup` which selects a child subtree other
8092    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
8093    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
8094    ///   associated with the same buffer collection as the calling `Node`.
8095    ///   Another reason for this error is if the `node_ref` is an
8096    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
8097    ///   a real `node_ref` obtained from `GetNodeRef`.
8098    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
8099    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
8100    ///   the needed rights expected on a real `node_ref`.
8101    /// * No other failing status codes are returned by this call.  However,
8102    ///   sysmem may add additional codes in future, so the client should have
8103    ///   sensible default handling for any failing status code.
8104    IsAlternateFor {
8105        payload: NodeIsAlternateForRequest,
8106        responder: BufferCollectionTokenIsAlternateForResponder,
8107    },
8108    /// Get the buffer collection ID. This ID is also available from
8109    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
8110    /// within the collection).
8111    ///
8112    /// This call is mainly useful in situations where we can't convey a
8113    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
8114    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
8115    /// handle, which can be joined back up with a `BufferCollection` client end
8116    /// that was created via a different path. Prefer to convey a
8117    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
8118    ///
8119    /// Trusting a `buffer_collection_id` value from a source other than sysmem
8120    /// is analogous to trusting a koid value from a source other than zircon.
8121    /// Both should be avoided unless really necessary, and both require
8122    /// caution. In some situations it may be reasonable to refer to a
8123    /// pre-established `BufferCollection` by `buffer_collection_id` via a
8124    /// protocol for efficiency reasons, but an incoming value purporting to be
8125    /// a `buffer_collection_id` is not sufficient alone to justify granting the
8126    /// sender of the `buffer_collection_id` any capability. The sender must
8127    /// first prove to a receiver that the sender has/had a VMO or has/had a
8128    /// `BufferCollectionToken` to the same collection by sending a handle that
8129    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
8130    /// `buffer_collection_id` value. The receiver should take care to avoid
8131    /// assuming that a sender had a `BufferCollectionToken` in cases where the
8132    /// sender has only proven that the sender had a VMO.
8133    ///
8134    /// - response `buffer_collection_id` This ID is unique per buffer
8135    ///   collection per boot. Each buffer is uniquely identified by the
8136    ///   `buffer_collection_id` and `buffer_index` together.
8137    GetBufferCollectionId { responder: BufferCollectionTokenGetBufferCollectionIdResponder },
8138    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
8139    /// created after this message to weak, which means that a client's `Node`
8140    /// client end (or a child created after this message) is not alone
8141    /// sufficient to keep allocated VMOs alive.
8142    ///
8143    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
8144    /// `close_weak_asap`.
8145    ///
8146    /// This message is only permitted before the `Node` becomes ready for
8147    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
8148    ///   * `BufferCollectionToken`: any time
8149    ///   * `BufferCollection`: before `SetConstraints`
8150    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
8151    ///
8152    /// Currently, no conversion from strong `Node` to weak `Node` after ready
8153    /// for allocation is provided, but a client can simulate that by creating
8154    /// an additional `Node` before allocation and setting that additional
8155    /// `Node` to weak, and then potentially at some point later sending
8156    /// `Release` and closing the client end of the client's strong `Node`, but
8157    /// keeping the client's weak `Node`.
8158    ///
8159    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
8160    /// collection failure (all `Node` client end(s) will see
8161    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
8162    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
8163    /// this situation until all `Node`(s) are ready for allocation. For initial
8164    /// allocation to succeed, at least one strong `Node` is required to exist
8165    /// at allocation time, but after that client receives VMO handles, that
8166    /// client can `BufferCollection.Release` and close the client end without
8167    /// causing this type of failure.
8168    ///
8169    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
8170    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
8171    /// separately as appropriate.
8172    SetWeak { control_handle: BufferCollectionTokenControlHandle },
8173    /// This indicates to sysmem that the client is prepared to pay attention to
8174    /// `close_weak_asap`.
8175    ///
8176    /// If sent, this message must be before
8177    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
8178    ///
8179    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
8180    /// send this message before `WaitForAllBuffersAllocated`, or a parent
8181    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
8182    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
8183    /// trigger buffer collection failure.
8184    ///
8185    /// This message is necessary because weak sysmem VMOs have not always been
8186    /// a thing, so older clients are not aware of the need to pay attention to
8187    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
8188    /// sysmem weak VMO handles asap. By having this message and requiring
8189    /// participants to indicate their acceptance of this aspect of the overall
8190    /// protocol, we avoid situations where an older client is delivered a weak
8191    /// VMO without any way for sysmem to get that VMO to close quickly later
8192    /// (and on a per-buffer basis).
8193    ///
8194    /// A participant that doesn't handle `close_weak_asap` and also doesn't
8195    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
8196    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
8197    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
8198    /// same participant has a child/delegate which does retrieve VMOs, that
8199    /// child/delegate will need to send `SetWeakOk` before
8200    /// `WaitForAllBuffersAllocated`.
8201    ///
8202    /// + request `for_child_nodes_also` If present and true, this means direct
8203    ///   child nodes of this node created after this message plus all
8204    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
8205    ///   those nodes. Any child node of this node that was created before this
8206    ///   message is not included. This setting is "sticky" in the sense that a
8207    ///   subsequent `SetWeakOk` without this bool set to true does not reset
8208    ///   the server-side bool. If this creates a problem for a participant, a
8209    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
8210    ///   tokens instead, as appropriate. A participant should only set
8211    ///   `for_child_nodes_also` true if the participant can really promise to
8212    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
8213    ///   weak VMO handles held by participants holding the corresponding child
8214    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
8215    ///   which are using sysmem(1) can be weak, despite the clients of those
8216    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
8217    ///   direct way to find out about `close_weak_asap`. This only applies to
8218    ///   descendents of this `Node` which are using sysmem(1), not to this
8219    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
8220    ///   token, which will fail allocation unless an ancestor of this `Node`
8221    ///   specified `for_child_nodes_also` true.
8222    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionTokenControlHandle },
8223    /// The server_end will be closed after this `Node` and any child nodes have
8224    /// have released their buffer counts, making those counts available for
8225    /// reservation by a different `Node` via
8226    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
8227    ///
8228    /// The `Node` buffer counts may not be released until the entire tree of
8229    /// `Node`(s) is closed or failed, because
8230    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
8231    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
8232    /// `Node` buffer counts remain reserved until the orphaned node is later
8233    /// cleaned up.
8234    ///
8235    /// If the `Node` exceeds a fairly large number of attached eventpair server
8236    /// ends, a log message will indicate this and the `Node` (and the
8237    /// appropriate) sub-tree will fail.
8238    ///
8239    /// The `server_end` will remain open when
8240    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
8241    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
8242    /// [`fuchsia.sysmem2/BufferCollection`].
8243    ///
8244    /// This message can also be used with a
8245    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8246    AttachNodeTracking {
8247        payload: NodeAttachNodeTrackingRequest,
8248        control_handle: BufferCollectionTokenControlHandle,
8249    },
8250    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
8251    /// one, referring to the same buffer collection.
8252    ///
8253    /// The created tokens are children of this token in the
8254    /// [`fuchsia.sysmem2/Node`] heirarchy.
8255    ///
8256    /// This method can be used to add more participants, by transferring the
8257    /// newly created tokens to additional participants.
8258    ///
8259    /// A new token will be returned for each entry in the
8260    /// `rights_attenuation_masks` array.
8261    ///
8262    /// If the called token may not actually be a valid token due to a
8263    /// potentially hostile/untrusted provider of the token, consider using
8264    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8265    /// instead of potentially getting stuck indefinitely if
8266    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
8267    /// due to the calling token not being a real token.
8268    ///
8269    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
8270    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
8271    /// method, because the sync step is included in this call, at the cost of a
8272    /// round trip during this call.
8273    ///
8274    /// All tokens must be turned in to sysmem via
8275    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8276    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8277    /// successfully allocate buffers (or to logically allocate buffers in the
8278    /// case of subtrees involving
8279    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
8280    ///
8281    /// All table fields are currently required.
8282    ///
8283    /// + request `rights_attenuation_mask` In each entry of
8284    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
8285    ///   in the buffer VMO rights obtainable via the corresponding returned
8286    ///   token. This allows an initiator or intermediary participant to
8287    ///   attenuate the rights available to a participant. This does not allow a
8288    ///   participant to gain rights that the participant doesn't already have.
8289    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
8290    ///   attenuation should be applied.
8291    /// - response `tokens` The client ends of each newly created token.
8292    DuplicateSync {
8293        payload: BufferCollectionTokenDuplicateSyncRequest,
8294        responder: BufferCollectionTokenDuplicateSyncResponder,
8295    },
8296    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
8297    /// one, referring to the same buffer collection.
8298    ///
8299    /// The created token is a child of this token in the
8300    /// [`fuchsia.sysmem2/Node`] heirarchy.
8301    ///
8302    /// This method can be used to add a participant, by transferring the newly
8303    /// created token to another participant.
8304    ///
8305    /// This one-way message can be used instead of the two-way
8306    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
8307    /// performance sensitive cases where it would be undesireable to wait for
8308    /// sysmem to respond to
8309    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
8310    /// client code isn't structured to make it easy to duplicate all the needed
8311    /// tokens at once.
8312    ///
8313    /// After sending one or more `Duplicate` messages, and before sending the
8314    /// newly created child tokens to other participants (or to other
8315    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
8316    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
8317    /// `Sync` call can be made on the token, or on the `BufferCollection`
8318    /// obtained by passing this token to `BindSharedCollection`.  Either will
8319    /// ensure that the server knows about the tokens created via `Duplicate`
8320    /// before the other participant sends the token to the server via separate
8321    /// `Allocator` channel.
8322    ///
8323    /// All tokens must be turned in via
8324    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8325    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8326    /// successfully allocate buffers.
8327    ///
8328    /// All table fields are currently required.
8329    ///
8330    /// + request `rights_attenuation_mask` The rights bits that are zero in
8331    ///   this mask will be absent in the buffer VMO rights obtainable via the
8332    ///   client end of `token_request`. This allows an initiator or
8333    ///   intermediary participant to attenuate the rights available to a
8334    ///   delegate participant. This does not allow a participant to gain rights
8335    ///   that the participant doesn't already have. The value
8336    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
8337    ///   should be applied.
8338    ///   + These values for rights_attenuation_mask result in no attenuation:
8339    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
8340    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
8341    ///       computed)
8342    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
8343    /// + request `token_request` is the server end of a `BufferCollectionToken`
8344    ///   channel. The client end of this channel acts as another participant in
8345    ///   the shared buffer collection.
8346    Duplicate {
8347        payload: BufferCollectionTokenDuplicateRequest,
8348        control_handle: BufferCollectionTokenControlHandle,
8349    },
8350    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
8351    ///
8352    /// When the `BufferCollectionToken` is converted to a
8353    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
8354    /// the `BufferCollection` also.
8355    ///
8356    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
8357    /// client end without having sent
8358    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
8359    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
8360    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
8361    /// to the root `Node`, which fails the whole buffer collection. In
8362    /// contrast, a dispensable `Node` can fail after buffers are allocated
8363    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
8364    /// heirarchy.
8365    ///
8366    /// The dispensable `Node` participates in constraints aggregation along
8367    /// with its parent before buffer allocation. If the dispensable `Node`
8368    /// fails before buffers are allocated, the failure propagates to the
8369    /// dispensable `Node`'s parent.
8370    ///
8371    /// After buffers are allocated, failure of the dispensable `Node` (or any
8372    /// child of the dispensable `Node`) does not propagate to the dispensable
8373    /// `Node`'s parent. Failure does propagate from a normal child of a
8374    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
8375    /// blocked from reaching its parent if the child is attached using
8376    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
8377    /// dispensable and the failure occurred after allocation.
8378    ///
8379    /// A dispensable `Node` can be used in cases where a participant needs to
8380    /// provide constraints, but after buffers are allocated, the participant
8381    /// can fail without causing buffer collection failure from the parent
8382    /// `Node`'s point of view.
8383    ///
8384    /// In contrast, `BufferCollection.AttachToken` can be used to create a
8385    /// `BufferCollectionToken` which does not participate in constraints
8386    /// aggregation with its parent `Node`, and whose failure at any time does
8387    /// not propagate to its parent `Node`, and whose potential delay providing
8388    /// constraints does not prevent the parent `Node` from completing its
8389    /// buffer allocation.
8390    ///
8391    /// An initiator (creator of the root `Node` using
8392    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
8393    /// scenarios choose to initially use a dispensable `Node` for a first
8394    /// instance of a participant, and then later if the first instance of that
8395    /// participant fails, a new second instance of that participant my be given
8396    /// a `BufferCollectionToken` created with `AttachToken`.
8397    ///
8398    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
8399    /// shortly before sending the dispensable `BufferCollectionToken` to a
8400    /// delegate participant. Because `SetDispensable` prevents propagation of
8401    /// child `Node` failure to parent `Node`(s), if the client was relying on
8402    /// noticing child failure via failure of the parent `Node` retained by the
8403    /// client, the client may instead need to notice failure via other means.
8404    /// If other means aren't available/convenient, the client can instead
8405    /// retain the dispensable `Node` and create a child `Node` under that to
8406    /// send to the delegate participant, retaining this `Node` in order to
8407    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
8408    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
8409    /// (e.g. starting a new instance of the delegate participant and handing it
8410    /// a `BufferCollectionToken` created using
8411    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
8412    /// and clean up in a client-specific way).
8413    ///
8414    /// While it is possible (and potentially useful) to `SetDispensable` on a
8415    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
8416    /// to later replace a failed dispensable `Node` that was a direct child of
8417    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
8418    /// (since there's no `AttachToken` on a group). Instead, to enable
8419    /// `AttachToken` replacement in this case, create an additional
8420    /// non-dispensable token that's a direct child of the group and make the
8421    /// existing dispensable token a child of the additional token.  This way,
8422    /// the additional token that is a direct child of the group has
8423    /// `BufferCollection.AttachToken` which can be used to replace the failed
8424    /// dispensable token.
8425    ///
8426    /// `SetDispensable` on an already-dispensable token is idempotent.
8427    SetDispensable { control_handle: BufferCollectionTokenControlHandle },
8428    /// Create a logical OR among a set of tokens, called a
8429    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8430    ///
8431    /// Most sysmem clients and many participants don't need to care about this
8432    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
8433    /// a participant wants to attempt to include one set of delegate
8434    /// participants, but if constraints don't combine successfully that way,
8435    /// fall back to a different (possibly overlapping) set of delegate
8436    /// participants, and/or fall back to a less demanding strategy (in terms of
8437    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
8438    /// across all involved delegate participants). In such cases, a
8439    /// `BufferCollectionTokenGroup` is useful.
8440    ///
8441    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
8442    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
8443    /// which are not selected during aggregation will fail (close), which a
8444    /// potential participant should notice when their `BufferCollection`
8445    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
8446    /// clean up the speculative usage that didn't end up happening (this is
8447    /// simimlar to a normal `BufferCollection` server end closing on failure to
8448    /// allocate a logical buffer collection or later async failure of a buffer
8449    /// collection).
8450    ///
8451    /// See comments on protocol `BufferCollectionTokenGroup`.
8452    ///
8453    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
8454    /// applied to the whole group can be achieved with a
8455    /// `BufferCollectionToken` for this purpose as a direct parent of the
8456    /// `BufferCollectionTokenGroup`.
8457    ///
8458    /// All table fields are currently required.
8459    ///
8460    /// + request `group_request` The server end of a
8461    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
8462    CreateBufferCollectionTokenGroup {
8463        payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8464        control_handle: BufferCollectionTokenControlHandle,
8465    },
8466    /// An interaction was received which does not match any known method.
8467    #[non_exhaustive]
8468    _UnknownMethod {
8469        /// Ordinal of the method that was called.
8470        ordinal: u64,
8471        control_handle: BufferCollectionTokenControlHandle,
8472        method_type: fidl::MethodType,
8473    },
8474}
8475
8476impl BufferCollectionTokenRequest {
8477    #[allow(irrefutable_let_patterns)]
8478    pub fn into_sync(self) -> Option<(BufferCollectionTokenSyncResponder)> {
8479        if let BufferCollectionTokenRequest::Sync { responder } = self {
8480            Some((responder))
8481        } else {
8482            None
8483        }
8484    }
8485
8486    #[allow(irrefutable_let_patterns)]
8487    pub fn into_release(self) -> Option<(BufferCollectionTokenControlHandle)> {
8488        if let BufferCollectionTokenRequest::Release { control_handle } = self {
8489            Some((control_handle))
8490        } else {
8491            None
8492        }
8493    }
8494
8495    #[allow(irrefutable_let_patterns)]
8496    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionTokenControlHandle)> {
8497        if let BufferCollectionTokenRequest::SetName { payload, control_handle } = self {
8498            Some((payload, control_handle))
8499        } else {
8500            None
8501        }
8502    }
8503
8504    #[allow(irrefutable_let_patterns)]
8505    pub fn into_set_debug_client_info(
8506        self,
8507    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenControlHandle)> {
8508        if let BufferCollectionTokenRequest::SetDebugClientInfo { payload, control_handle } = self {
8509            Some((payload, control_handle))
8510        } else {
8511            None
8512        }
8513    }
8514
8515    #[allow(irrefutable_let_patterns)]
8516    pub fn into_set_debug_timeout_log_deadline(
8517        self,
8518    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenControlHandle)> {
8519        if let BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {
8520            payload,
8521            control_handle,
8522        } = self
8523        {
8524            Some((payload, control_handle))
8525        } else {
8526            None
8527        }
8528    }
8529
8530    #[allow(irrefutable_let_patterns)]
8531    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenControlHandle)> {
8532        if let BufferCollectionTokenRequest::SetVerboseLogging { control_handle } = self {
8533            Some((control_handle))
8534        } else {
8535            None
8536        }
8537    }
8538
8539    #[allow(irrefutable_let_patterns)]
8540    pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGetNodeRefResponder)> {
8541        if let BufferCollectionTokenRequest::GetNodeRef { responder } = self {
8542            Some((responder))
8543        } else {
8544            None
8545        }
8546    }
8547
8548    #[allow(irrefutable_let_patterns)]
8549    pub fn into_is_alternate_for(
8550        self,
8551    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenIsAlternateForResponder)> {
8552        if let BufferCollectionTokenRequest::IsAlternateFor { payload, responder } = self {
8553            Some((payload, responder))
8554        } else {
8555            None
8556        }
8557    }
8558
8559    #[allow(irrefutable_let_patterns)]
8560    pub fn into_get_buffer_collection_id(
8561        self,
8562    ) -> Option<(BufferCollectionTokenGetBufferCollectionIdResponder)> {
8563        if let BufferCollectionTokenRequest::GetBufferCollectionId { responder } = self {
8564            Some((responder))
8565        } else {
8566            None
8567        }
8568    }
8569
8570    #[allow(irrefutable_let_patterns)]
8571    pub fn into_set_weak(self) -> Option<(BufferCollectionTokenControlHandle)> {
8572        if let BufferCollectionTokenRequest::SetWeak { control_handle } = self {
8573            Some((control_handle))
8574        } else {
8575            None
8576        }
8577    }
8578
8579    #[allow(irrefutable_let_patterns)]
8580    pub fn into_set_weak_ok(
8581        self,
8582    ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenControlHandle)> {
8583        if let BufferCollectionTokenRequest::SetWeakOk { payload, control_handle } = self {
8584            Some((payload, control_handle))
8585        } else {
8586            None
8587        }
8588    }
8589
8590    #[allow(irrefutable_let_patterns)]
8591    pub fn into_attach_node_tracking(
8592        self,
8593    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenControlHandle)> {
8594        if let BufferCollectionTokenRequest::AttachNodeTracking { payload, control_handle } = self {
8595            Some((payload, control_handle))
8596        } else {
8597            None
8598        }
8599    }
8600
8601    #[allow(irrefutable_let_patterns)]
8602    pub fn into_duplicate_sync(
8603        self,
8604    ) -> Option<(
8605        BufferCollectionTokenDuplicateSyncRequest,
8606        BufferCollectionTokenDuplicateSyncResponder,
8607    )> {
8608        if let BufferCollectionTokenRequest::DuplicateSync { payload, responder } = self {
8609            Some((payload, responder))
8610        } else {
8611            None
8612        }
8613    }
8614
8615    #[allow(irrefutable_let_patterns)]
8616    pub fn into_duplicate(
8617        self,
8618    ) -> Option<(BufferCollectionTokenDuplicateRequest, BufferCollectionTokenControlHandle)> {
8619        if let BufferCollectionTokenRequest::Duplicate { payload, control_handle } = self {
8620            Some((payload, control_handle))
8621        } else {
8622            None
8623        }
8624    }
8625
8626    #[allow(irrefutable_let_patterns)]
8627    pub fn into_set_dispensable(self) -> Option<(BufferCollectionTokenControlHandle)> {
8628        if let BufferCollectionTokenRequest::SetDispensable { control_handle } = self {
8629            Some((control_handle))
8630        } else {
8631            None
8632        }
8633    }
8634
8635    #[allow(irrefutable_let_patterns)]
8636    pub fn into_create_buffer_collection_token_group(
8637        self,
8638    ) -> Option<(
8639        BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8640        BufferCollectionTokenControlHandle,
8641    )> {
8642        if let BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {
8643            payload,
8644            control_handle,
8645        } = self
8646        {
8647            Some((payload, control_handle))
8648        } else {
8649            None
8650        }
8651    }
8652
8653    /// Name of the method defined in FIDL
8654    pub fn method_name(&self) -> &'static str {
8655        match *self {
8656            BufferCollectionTokenRequest::Sync { .. } => "sync",
8657            BufferCollectionTokenRequest::Release { .. } => "release",
8658            BufferCollectionTokenRequest::SetName { .. } => "set_name",
8659            BufferCollectionTokenRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
8660            BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline { .. } => {
8661                "set_debug_timeout_log_deadline"
8662            }
8663            BufferCollectionTokenRequest::SetVerboseLogging { .. } => "set_verbose_logging",
8664            BufferCollectionTokenRequest::GetNodeRef { .. } => "get_node_ref",
8665            BufferCollectionTokenRequest::IsAlternateFor { .. } => "is_alternate_for",
8666            BufferCollectionTokenRequest::GetBufferCollectionId { .. } => {
8667                "get_buffer_collection_id"
8668            }
8669            BufferCollectionTokenRequest::SetWeak { .. } => "set_weak",
8670            BufferCollectionTokenRequest::SetWeakOk { .. } => "set_weak_ok",
8671            BufferCollectionTokenRequest::AttachNodeTracking { .. } => "attach_node_tracking",
8672            BufferCollectionTokenRequest::DuplicateSync { .. } => "duplicate_sync",
8673            BufferCollectionTokenRequest::Duplicate { .. } => "duplicate",
8674            BufferCollectionTokenRequest::SetDispensable { .. } => "set_dispensable",
8675            BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup { .. } => {
8676                "create_buffer_collection_token_group"
8677            }
8678            BufferCollectionTokenRequest::_UnknownMethod {
8679                method_type: fidl::MethodType::OneWay,
8680                ..
8681            } => "unknown one-way method",
8682            BufferCollectionTokenRequest::_UnknownMethod {
8683                method_type: fidl::MethodType::TwoWay,
8684                ..
8685            } => "unknown two-way method",
8686        }
8687    }
8688}
8689
8690#[derive(Debug, Clone)]
8691pub struct BufferCollectionTokenControlHandle {
8692    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
8693}
8694
8695impl fidl::endpoints::ControlHandle for BufferCollectionTokenControlHandle {
8696    fn shutdown(&self) {
8697        self.inner.shutdown()
8698    }
8699    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
8700        self.inner.shutdown_with_epitaph(status)
8701    }
8702
8703    fn is_closed(&self) -> bool {
8704        self.inner.channel().is_closed()
8705    }
8706    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
8707        self.inner.channel().on_closed()
8708    }
8709
8710    #[cfg(target_os = "fuchsia")]
8711    fn signal_peer(
8712        &self,
8713        clear_mask: zx::Signals,
8714        set_mask: zx::Signals,
8715    ) -> Result<(), zx_status::Status> {
8716        use fidl::Peered;
8717        self.inner.channel().signal_peer(clear_mask, set_mask)
8718    }
8719}
8720
8721impl BufferCollectionTokenControlHandle {}
8722
8723#[must_use = "FIDL methods require a response to be sent"]
8724#[derive(Debug)]
8725pub struct BufferCollectionTokenSyncResponder {
8726    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8727    tx_id: u32,
8728}
8729
8730/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8731/// if the responder is dropped without sending a response, so that the client
8732/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8733impl std::ops::Drop for BufferCollectionTokenSyncResponder {
8734    fn drop(&mut self) {
8735        self.control_handle.shutdown();
8736        // Safety: drops once, never accessed again
8737        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8738    }
8739}
8740
8741impl fidl::endpoints::Responder for BufferCollectionTokenSyncResponder {
8742    type ControlHandle = BufferCollectionTokenControlHandle;
8743
8744    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8745        &self.control_handle
8746    }
8747
8748    fn drop_without_shutdown(mut self) {
8749        // Safety: drops once, never accessed again due to mem::forget
8750        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8751        // Prevent Drop from running (which would shut down the channel)
8752        std::mem::forget(self);
8753    }
8754}
8755
8756impl BufferCollectionTokenSyncResponder {
8757    /// Sends a response to the FIDL transaction.
8758    ///
8759    /// Sets the channel to shutdown if an error occurs.
8760    pub fn send(self) -> Result<(), fidl::Error> {
8761        let _result = self.send_raw();
8762        if _result.is_err() {
8763            self.control_handle.shutdown();
8764        }
8765        self.drop_without_shutdown();
8766        _result
8767    }
8768
8769    /// Similar to "send" but does not shutdown the channel if an error occurs.
8770    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
8771        let _result = self.send_raw();
8772        self.drop_without_shutdown();
8773        _result
8774    }
8775
8776    fn send_raw(&self) -> Result<(), fidl::Error> {
8777        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
8778            fidl::encoding::Flexible::new(()),
8779            self.tx_id,
8780            0x11ac2555cf575b54,
8781            fidl::encoding::DynamicFlags::FLEXIBLE,
8782        )
8783    }
8784}
8785
8786#[must_use = "FIDL methods require a response to be sent"]
8787#[derive(Debug)]
8788pub struct BufferCollectionTokenGetNodeRefResponder {
8789    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8790    tx_id: u32,
8791}
8792
8793/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8794/// if the responder is dropped without sending a response, so that the client
8795/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8796impl std::ops::Drop for BufferCollectionTokenGetNodeRefResponder {
8797    fn drop(&mut self) {
8798        self.control_handle.shutdown();
8799        // Safety: drops once, never accessed again
8800        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8801    }
8802}
8803
8804impl fidl::endpoints::Responder for BufferCollectionTokenGetNodeRefResponder {
8805    type ControlHandle = BufferCollectionTokenControlHandle;
8806
8807    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8808        &self.control_handle
8809    }
8810
8811    fn drop_without_shutdown(mut self) {
8812        // Safety: drops once, never accessed again due to mem::forget
8813        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8814        // Prevent Drop from running (which would shut down the channel)
8815        std::mem::forget(self);
8816    }
8817}
8818
8819impl BufferCollectionTokenGetNodeRefResponder {
8820    /// Sends a response to the FIDL transaction.
8821    ///
8822    /// Sets the channel to shutdown if an error occurs.
8823    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8824        let _result = self.send_raw(payload);
8825        if _result.is_err() {
8826            self.control_handle.shutdown();
8827        }
8828        self.drop_without_shutdown();
8829        _result
8830    }
8831
8832    /// Similar to "send" but does not shutdown the channel if an error occurs.
8833    pub fn send_no_shutdown_on_err(
8834        self,
8835        mut payload: NodeGetNodeRefResponse,
8836    ) -> Result<(), fidl::Error> {
8837        let _result = self.send_raw(payload);
8838        self.drop_without_shutdown();
8839        _result
8840    }
8841
8842    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8843        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
8844            fidl::encoding::Flexible::new(&mut payload),
8845            self.tx_id,
8846            0x5b3d0e51614df053,
8847            fidl::encoding::DynamicFlags::FLEXIBLE,
8848        )
8849    }
8850}
8851
8852#[must_use = "FIDL methods require a response to be sent"]
8853#[derive(Debug)]
8854pub struct BufferCollectionTokenIsAlternateForResponder {
8855    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8856    tx_id: u32,
8857}
8858
8859/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8860/// if the responder is dropped without sending a response, so that the client
8861/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8862impl std::ops::Drop for BufferCollectionTokenIsAlternateForResponder {
8863    fn drop(&mut self) {
8864        self.control_handle.shutdown();
8865        // Safety: drops once, never accessed again
8866        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8867    }
8868}
8869
8870impl fidl::endpoints::Responder for BufferCollectionTokenIsAlternateForResponder {
8871    type ControlHandle = BufferCollectionTokenControlHandle;
8872
8873    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8874        &self.control_handle
8875    }
8876
8877    fn drop_without_shutdown(mut self) {
8878        // Safety: drops once, never accessed again due to mem::forget
8879        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8880        // Prevent Drop from running (which would shut down the channel)
8881        std::mem::forget(self);
8882    }
8883}
8884
8885impl BufferCollectionTokenIsAlternateForResponder {
8886    /// Sends a response to the FIDL transaction.
8887    ///
8888    /// Sets the channel to shutdown if an error occurs.
8889    pub fn send(
8890        self,
8891        mut result: Result<&NodeIsAlternateForResponse, Error>,
8892    ) -> Result<(), fidl::Error> {
8893        let _result = self.send_raw(result);
8894        if _result.is_err() {
8895            self.control_handle.shutdown();
8896        }
8897        self.drop_without_shutdown();
8898        _result
8899    }
8900
8901    /// Similar to "send" but does not shutdown the channel if an error occurs.
8902    pub fn send_no_shutdown_on_err(
8903        self,
8904        mut result: Result<&NodeIsAlternateForResponse, Error>,
8905    ) -> Result<(), fidl::Error> {
8906        let _result = self.send_raw(result);
8907        self.drop_without_shutdown();
8908        _result
8909    }
8910
8911    fn send_raw(
8912        &self,
8913        mut result: Result<&NodeIsAlternateForResponse, Error>,
8914    ) -> Result<(), fidl::Error> {
8915        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
8916            NodeIsAlternateForResponse,
8917            Error,
8918        >>(
8919            fidl::encoding::FlexibleResult::new(result),
8920            self.tx_id,
8921            0x3a58e00157e0825,
8922            fidl::encoding::DynamicFlags::FLEXIBLE,
8923        )
8924    }
8925}
8926
8927#[must_use = "FIDL methods require a response to be sent"]
8928#[derive(Debug)]
8929pub struct BufferCollectionTokenGetBufferCollectionIdResponder {
8930    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8931    tx_id: u32,
8932}
8933
8934/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8935/// if the responder is dropped without sending a response, so that the client
8936/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8937impl std::ops::Drop for BufferCollectionTokenGetBufferCollectionIdResponder {
8938    fn drop(&mut self) {
8939        self.control_handle.shutdown();
8940        // Safety: drops once, never accessed again
8941        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8942    }
8943}
8944
8945impl fidl::endpoints::Responder for BufferCollectionTokenGetBufferCollectionIdResponder {
8946    type ControlHandle = BufferCollectionTokenControlHandle;
8947
8948    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8949        &self.control_handle
8950    }
8951
8952    fn drop_without_shutdown(mut self) {
8953        // Safety: drops once, never accessed again due to mem::forget
8954        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8955        // Prevent Drop from running (which would shut down the channel)
8956        std::mem::forget(self);
8957    }
8958}
8959
8960impl BufferCollectionTokenGetBufferCollectionIdResponder {
8961    /// Sends a response to the FIDL transaction.
8962    ///
8963    /// Sets the channel to shutdown if an error occurs.
8964    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
8965        let _result = self.send_raw(payload);
8966        if _result.is_err() {
8967            self.control_handle.shutdown();
8968        }
8969        self.drop_without_shutdown();
8970        _result
8971    }
8972
8973    /// Similar to "send" but does not shutdown the channel if an error occurs.
8974    pub fn send_no_shutdown_on_err(
8975        self,
8976        mut payload: &NodeGetBufferCollectionIdResponse,
8977    ) -> Result<(), fidl::Error> {
8978        let _result = self.send_raw(payload);
8979        self.drop_without_shutdown();
8980        _result
8981    }
8982
8983    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
8984        self.control_handle
8985            .inner
8986            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
8987                fidl::encoding::Flexible::new(payload),
8988                self.tx_id,
8989                0x77d19a494b78ba8c,
8990                fidl::encoding::DynamicFlags::FLEXIBLE,
8991            )
8992    }
8993}
8994
8995#[must_use = "FIDL methods require a response to be sent"]
8996#[derive(Debug)]
8997pub struct BufferCollectionTokenDuplicateSyncResponder {
8998    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8999    tx_id: u32,
9000}
9001
9002/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9003/// if the responder is dropped without sending a response, so that the client
9004/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9005impl std::ops::Drop for BufferCollectionTokenDuplicateSyncResponder {
9006    fn drop(&mut self) {
9007        self.control_handle.shutdown();
9008        // Safety: drops once, never accessed again
9009        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9010    }
9011}
9012
9013impl fidl::endpoints::Responder for BufferCollectionTokenDuplicateSyncResponder {
9014    type ControlHandle = BufferCollectionTokenControlHandle;
9015
9016    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9017        &self.control_handle
9018    }
9019
9020    fn drop_without_shutdown(mut self) {
9021        // Safety: drops once, never accessed again due to mem::forget
9022        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9023        // Prevent Drop from running (which would shut down the channel)
9024        std::mem::forget(self);
9025    }
9026}
9027
9028impl BufferCollectionTokenDuplicateSyncResponder {
9029    /// Sends a response to the FIDL transaction.
9030    ///
9031    /// Sets the channel to shutdown if an error occurs.
9032    pub fn send(
9033        self,
9034        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9035    ) -> Result<(), fidl::Error> {
9036        let _result = self.send_raw(payload);
9037        if _result.is_err() {
9038            self.control_handle.shutdown();
9039        }
9040        self.drop_without_shutdown();
9041        _result
9042    }
9043
9044    /// Similar to "send" but does not shutdown the channel if an error occurs.
9045    pub fn send_no_shutdown_on_err(
9046        self,
9047        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9048    ) -> Result<(), fidl::Error> {
9049        let _result = self.send_raw(payload);
9050        self.drop_without_shutdown();
9051        _result
9052    }
9053
9054    fn send_raw(
9055        &self,
9056        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9057    ) -> Result<(), fidl::Error> {
9058        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
9059            BufferCollectionTokenDuplicateSyncResponse,
9060        >>(
9061            fidl::encoding::Flexible::new(&mut payload),
9062            self.tx_id,
9063            0x1c1af9919d1ca45c,
9064            fidl::encoding::DynamicFlags::FLEXIBLE,
9065        )
9066    }
9067}
9068
9069#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
9070pub struct BufferCollectionTokenGroupMarker;
9071
9072impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenGroupMarker {
9073    type Proxy = BufferCollectionTokenGroupProxy;
9074    type RequestStream = BufferCollectionTokenGroupRequestStream;
9075    #[cfg(target_os = "fuchsia")]
9076    type SynchronousProxy = BufferCollectionTokenGroupSynchronousProxy;
9077
9078    const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionTokenGroup";
9079}
9080
9081pub trait BufferCollectionTokenGroupProxyInterface: Send + Sync {
9082    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
9083    fn r#sync(&self) -> Self::SyncResponseFut;
9084    fn r#release(&self) -> Result<(), fidl::Error>;
9085    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
9086    fn r#set_debug_client_info(
9087        &self,
9088        payload: &NodeSetDebugClientInfoRequest,
9089    ) -> Result<(), fidl::Error>;
9090    fn r#set_debug_timeout_log_deadline(
9091        &self,
9092        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9093    ) -> Result<(), fidl::Error>;
9094    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
9095    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
9096        + Send;
9097    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
9098    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
9099        + Send;
9100    fn r#is_alternate_for(
9101        &self,
9102        payload: NodeIsAlternateForRequest,
9103    ) -> Self::IsAlternateForResponseFut;
9104    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
9105        + Send;
9106    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
9107    fn r#set_weak(&self) -> Result<(), fidl::Error>;
9108    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
9109    fn r#attach_node_tracking(
9110        &self,
9111        payload: NodeAttachNodeTrackingRequest,
9112    ) -> Result<(), fidl::Error>;
9113    fn r#create_child(
9114        &self,
9115        payload: BufferCollectionTokenGroupCreateChildRequest,
9116    ) -> Result<(), fidl::Error>;
9117    type CreateChildrenSyncResponseFut: std::future::Future<
9118            Output = Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error>,
9119        > + Send;
9120    fn r#create_children_sync(
9121        &self,
9122        payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9123    ) -> Self::CreateChildrenSyncResponseFut;
9124    fn r#all_children_present(&self) -> Result<(), fidl::Error>;
9125}
9126#[derive(Debug)]
9127#[cfg(target_os = "fuchsia")]
9128pub struct BufferCollectionTokenGroupSynchronousProxy {
9129    client: fidl::client::sync::Client,
9130}
9131
9132#[cfg(target_os = "fuchsia")]
9133impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenGroupSynchronousProxy {
9134    type Proxy = BufferCollectionTokenGroupProxy;
9135    type Protocol = BufferCollectionTokenGroupMarker;
9136
9137    fn from_channel(inner: fidl::Channel) -> Self {
9138        Self::new(inner)
9139    }
9140
9141    fn into_channel(self) -> fidl::Channel {
9142        self.client.into_channel()
9143    }
9144
9145    fn as_channel(&self) -> &fidl::Channel {
9146        self.client.as_channel()
9147    }
9148}
9149
9150#[cfg(target_os = "fuchsia")]
9151impl BufferCollectionTokenGroupSynchronousProxy {
9152    pub fn new(channel: fidl::Channel) -> Self {
9153        let protocol_name =
9154            <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9155        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
9156    }
9157
9158    pub fn into_channel(self) -> fidl::Channel {
9159        self.client.into_channel()
9160    }
9161
9162    /// Waits until an event arrives and returns it. It is safe for other
9163    /// threads to make concurrent requests while waiting for an event.
9164    pub fn wait_for_event(
9165        &self,
9166        deadline: zx::MonotonicInstant,
9167    ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
9168        BufferCollectionTokenGroupEvent::decode(self.client.wait_for_event(deadline)?)
9169    }
9170
9171    /// Ensure that previous messages have been received server side. This is
9172    /// particularly useful after previous messages that created new tokens,
9173    /// because a token must be known to the sysmem server before sending the
9174    /// token to another participant.
9175    ///
9176    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9177    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9178    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9179    /// to mitigate the possibility of a hostile/fake
9180    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9181    /// Another way is to pass the token to
9182    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9183    /// the token as part of exchanging it for a
9184    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9185    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9186    /// of stalling.
9187    ///
9188    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9189    /// and then starting and completing a `Sync`, it's then safe to send the
9190    /// `BufferCollectionToken` client ends to other participants knowing the
9191    /// server will recognize the tokens when they're sent by the other
9192    /// participants to sysmem in a
9193    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9194    /// efficient way to create tokens while avoiding unnecessary round trips.
9195    ///
9196    /// Other options include waiting for each
9197    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9198    /// individually (using separate call to `Sync` after each), or calling
9199    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9200    /// converted to a `BufferCollection` via
9201    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9202    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9203    /// the sync step and can create multiple tokens at once.
9204    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
9205        let _response = self.client.send_query::<
9206            fidl::encoding::EmptyPayload,
9207            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
9208        >(
9209            (),
9210            0x11ac2555cf575b54,
9211            fidl::encoding::DynamicFlags::FLEXIBLE,
9212            ___deadline,
9213        )?
9214        .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
9215        Ok(_response)
9216    }
9217
9218    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9219    ///
9220    /// Normally a participant will convert a `BufferCollectionToken` into a
9221    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9222    /// `Release` via the token (and then close the channel immediately or
9223    /// shortly later in response to server closing the server end), which
9224    /// avoids causing buffer collection failure. Without a prior `Release`,
9225    /// closing the `BufferCollectionToken` client end will cause buffer
9226    /// collection failure.
9227    ///
9228    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9229    ///
9230    /// By default the server handles unexpected closure of a
9231    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9232    /// first) by failing the buffer collection. Partly this is to expedite
9233    /// closing VMO handles to reclaim memory when any participant fails. If a
9234    /// participant would like to cleanly close a `BufferCollection` without
9235    /// causing buffer collection failure, the participant can send `Release`
9236    /// before closing the `BufferCollection` client end. The `Release` can
9237    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
9238    /// buffer collection won't require constraints from this node in order to
9239    /// allocate. If after `SetConstraints`, the constraints are retained and
9240    /// aggregated, despite the lack of `BufferCollection` connection at the
9241    /// time of constraints aggregation.
9242    ///
9243    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
9244    ///
9245    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
9246    /// end (without `Release` first) will trigger failure of the buffer
9247    /// collection. To close a `BufferCollectionTokenGroup` channel without
9248    /// failing the buffer collection, ensure that AllChildrenPresent() has been
9249    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
9250    /// client end.
9251    ///
9252    /// If `Release` occurs before
9253    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
9254    /// buffer collection will fail (triggered by reception of `Release` without
9255    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
9256    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
9257    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
9258    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
9259    /// close requires `AllChildrenPresent` (if not already sent), then
9260    /// `Release`, then close client end.
9261    ///
9262    /// If `Release` occurs after `AllChildrenPresent`, the children and all
9263    /// their constraints remain intact (just as they would if the
9264    /// `BufferCollectionTokenGroup` channel had remained open), and the client
9265    /// end close doesn't trigger buffer collection failure.
9266    ///
9267    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
9268    ///
9269    /// For brevity, the per-channel-protocol paragraphs above ignore the
9270    /// separate failure domain created by
9271    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
9272    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
9273    /// unexpectedly closes (without `Release` first) and that client end is
9274    /// under a failure domain, instead of failing the whole buffer collection,
9275    /// the failure domain is failed, but the buffer collection itself is
9276    /// isolated from failure of the failure domain. Such failure domains can be
9277    /// nested, in which case only the inner-most failure domain in which the
9278    /// `Node` resides fails.
9279    pub fn r#release(&self) -> Result<(), fidl::Error> {
9280        self.client.send::<fidl::encoding::EmptyPayload>(
9281            (),
9282            0x6a5cae7d6d6e04c6,
9283            fidl::encoding::DynamicFlags::FLEXIBLE,
9284        )
9285    }
9286
9287    /// Set a name for VMOs in this buffer collection.
9288    ///
9289    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
9290    /// will be truncated to fit. The name of the vmo will be suffixed with the
9291    /// buffer index within the collection (if the suffix fits within
9292    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
9293    /// listed in the inspect data.
9294    ///
9295    /// The name only affects VMOs allocated after the name is set; this call
9296    /// does not rename existing VMOs. If multiple clients set different names
9297    /// then the larger priority value will win. Setting a new name with the
9298    /// same priority as a prior name doesn't change the name.
9299    ///
9300    /// All table fields are currently required.
9301    ///
9302    /// + request `priority` The name is only set if this is the first `SetName`
9303    ///   or if `priority` is greater than any previous `priority` value in
9304    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
9305    /// + request `name` The name for VMOs created under this buffer collection.
9306    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
9307        self.client.send::<NodeSetNameRequest>(
9308            payload,
9309            0xb41f1624f48c1e9,
9310            fidl::encoding::DynamicFlags::FLEXIBLE,
9311        )
9312    }
9313
9314    /// Set information about the current client that can be used by sysmem to
9315    /// help diagnose leaking memory and allocation stalls waiting for a
9316    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
9317    ///
9318    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
9319    /// `Node`(s) derived from this `Node`, unless overriden by
9320    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
9321    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
9322    ///
9323    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
9324    /// `Allocator` is the most efficient way to ensure that all
9325    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
9326    /// set, and is also more efficient than separately sending the same debug
9327    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
9328    /// created [`fuchsia.sysmem2/Node`].
9329    ///
9330    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
9331    /// indicate which client is closing their channel first, leading to subtree
9332    /// failure (which can be normal if the purpose of the subtree is over, but
9333    /// if happening earlier than expected, the client-channel-specific name can
9334    /// help diagnose where the failure is first coming from, from sysmem's
9335    /// point of view).
9336    ///
9337    /// All table fields are currently required.
9338    ///
9339    /// + request `name` This can be an arbitrary string, but the current
9340    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
9341    /// + request `id` This can be an arbitrary id, but the current process ID
9342    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
9343    pub fn r#set_debug_client_info(
9344        &self,
9345        mut payload: &NodeSetDebugClientInfoRequest,
9346    ) -> Result<(), fidl::Error> {
9347        self.client.send::<NodeSetDebugClientInfoRequest>(
9348            payload,
9349            0x5cde8914608d99b1,
9350            fidl::encoding::DynamicFlags::FLEXIBLE,
9351        )
9352    }
9353
9354    /// Sysmem logs a warning if sysmem hasn't seen
9355    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
9356    /// within 5 seconds after creation of a new collection.
9357    ///
9358    /// Clients can call this method to change when the log is printed. If
9359    /// multiple client set the deadline, it's unspecified which deadline will
9360    /// take effect.
9361    ///
9362    /// In most cases the default works well.
9363    ///
9364    /// All table fields are currently required.
9365    ///
9366    /// + request `deadline` The time at which sysmem will start trying to log
9367    ///   the warning, unless all constraints are with sysmem by then.
9368    pub fn r#set_debug_timeout_log_deadline(
9369        &self,
9370        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9371    ) -> Result<(), fidl::Error> {
9372        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
9373            payload,
9374            0x716b0af13d5c0806,
9375            fidl::encoding::DynamicFlags::FLEXIBLE,
9376        )
9377    }
9378
9379    /// This enables verbose logging for the buffer collection.
9380    ///
9381    /// Verbose logging includes constraints set via
9382    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
9383    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
9384    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
9385    /// the tree of `Node`(s).
9386    ///
9387    /// Normally sysmem prints only a single line complaint when aggregation
9388    /// fails, with just the specific detailed reason that aggregation failed,
9389    /// with little surrounding context.  While this is often enough to diagnose
9390    /// a problem if only a small change was made and everything was working
9391    /// before the small change, it's often not particularly helpful for getting
9392    /// a new buffer collection to work for the first time.  Especially with
9393    /// more complex trees of nodes, involving things like
9394    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
9395    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
9396    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
9397    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
9398    /// looks like and why it's failing a logical allocation, or why a tree or
9399    /// subtree is failing sooner than expected.
9400    ///
9401    /// The intent of the extra logging is to be acceptable from a performance
9402    /// point of view, under the assumption that verbose logging is only enabled
9403    /// on a low number of buffer collections. If we're not tracking down a bug,
9404    /// we shouldn't send this message.
9405    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
9406        self.client.send::<fidl::encoding::EmptyPayload>(
9407            (),
9408            0x5209c77415b4dfad,
9409            fidl::encoding::DynamicFlags::FLEXIBLE,
9410        )
9411    }
9412
9413    /// This gets a handle that can be used as a parameter to
9414    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
9415    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
9416    /// client obtained this handle from this `Node`.
9417    ///
9418    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
9419    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
9420    /// despite the two calls typically being on different channels.
9421    ///
9422    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
9423    ///
9424    /// All table fields are currently required.
9425    ///
9426    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
9427    ///   different `Node` channel, to prove that the client obtained the handle
9428    ///   from this `Node`.
9429    pub fn r#get_node_ref(
9430        &self,
9431        ___deadline: zx::MonotonicInstant,
9432    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
9433        let _response = self.client.send_query::<
9434            fidl::encoding::EmptyPayload,
9435            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
9436        >(
9437            (),
9438            0x5b3d0e51614df053,
9439            fidl::encoding::DynamicFlags::FLEXIBLE,
9440            ___deadline,
9441        )?
9442        .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
9443        Ok(_response)
9444    }
9445
9446    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
9447    /// rooted at a different child token of a common parent
9448    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
9449    /// passed-in `node_ref`.
9450    ///
9451    /// This call is for assisting with admission control de-duplication, and
9452    /// with debugging.
9453    ///
9454    /// The `node_ref` must be obtained using
9455    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
9456    ///
9457    /// The `node_ref` can be a duplicated handle; it's not necessary to call
9458    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
9459    ///
9460    /// If a calling token may not actually be a valid token at all due to a
9461    /// potentially hostile/untrusted provider of the token, call
9462    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
9463    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
9464    /// never responds due to a calling token not being a real token (not really
9465    /// talking to sysmem).  Another option is to call
9466    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
9467    /// which also validates the token along with converting it to a
9468    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
9469    ///
9470    /// All table fields are currently required.
9471    ///
9472    /// - response `is_alternate`
9473    ///   - true: The first parent node in common between the calling node and
9474    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
9475    ///     that the calling `Node` and the `node_ref` `Node` will not have both
9476    ///     their constraints apply - rather sysmem will choose one or the other
9477    ///     of the constraints - never both.  This is because only one child of
9478    ///     a `BufferCollectionTokenGroup` is selected during logical
9479    ///     allocation, with only that one child's subtree contributing to
9480    ///     constraints aggregation.
9481    ///   - false: The first parent node in common between the calling `Node`
9482    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
9483    ///     Currently, this means the first parent node in common is a
9484    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
9485    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
9486    ///     `Node` may have both their constraints apply during constraints
9487    ///     aggregation of the logical allocation, if both `Node`(s) are
9488    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
9489    ///     this case, there is no `BufferCollectionTokenGroup` that will
9490    ///     directly prevent the two `Node`(s) from both being selected and
9491    ///     their constraints both aggregated, but even when false, one or both
9492    ///     `Node`(s) may still be eliminated from consideration if one or both
9493    ///     `Node`(s) has a direct or indirect parent
9494    ///     `BufferCollectionTokenGroup` which selects a child subtree other
9495    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
9496    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
9497    ///   associated with the same buffer collection as the calling `Node`.
9498    ///   Another reason for this error is if the `node_ref` is an
9499    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
9500    ///   a real `node_ref` obtained from `GetNodeRef`.
9501    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
9502    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
9503    ///   the needed rights expected on a real `node_ref`.
9504    /// * No other failing status codes are returned by this call.  However,
9505    ///   sysmem may add additional codes in future, so the client should have
9506    ///   sensible default handling for any failing status code.
9507    pub fn r#is_alternate_for(
9508        &self,
9509        mut payload: NodeIsAlternateForRequest,
9510        ___deadline: zx::MonotonicInstant,
9511    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
9512        let _response = self.client.send_query::<
9513            NodeIsAlternateForRequest,
9514            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
9515        >(
9516            &mut payload,
9517            0x3a58e00157e0825,
9518            fidl::encoding::DynamicFlags::FLEXIBLE,
9519            ___deadline,
9520        )?
9521        .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
9522        Ok(_response.map(|x| x))
9523    }
9524
9525    /// Get the buffer collection ID. This ID is also available from
9526    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
9527    /// within the collection).
9528    ///
9529    /// This call is mainly useful in situations where we can't convey a
9530    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
9531    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
9532    /// handle, which can be joined back up with a `BufferCollection` client end
9533    /// that was created via a different path. Prefer to convey a
9534    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
9535    ///
9536    /// Trusting a `buffer_collection_id` value from a source other than sysmem
9537    /// is analogous to trusting a koid value from a source other than zircon.
9538    /// Both should be avoided unless really necessary, and both require
9539    /// caution. In some situations it may be reasonable to refer to a
9540    /// pre-established `BufferCollection` by `buffer_collection_id` via a
9541    /// protocol for efficiency reasons, but an incoming value purporting to be
9542    /// a `buffer_collection_id` is not sufficient alone to justify granting the
9543    /// sender of the `buffer_collection_id` any capability. The sender must
9544    /// first prove to a receiver that the sender has/had a VMO or has/had a
9545    /// `BufferCollectionToken` to the same collection by sending a handle that
9546    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
9547    /// `buffer_collection_id` value. The receiver should take care to avoid
9548    /// assuming that a sender had a `BufferCollectionToken` in cases where the
9549    /// sender has only proven that the sender had a VMO.
9550    ///
9551    /// - response `buffer_collection_id` This ID is unique per buffer
9552    ///   collection per boot. Each buffer is uniquely identified by the
9553    ///   `buffer_collection_id` and `buffer_index` together.
9554    pub fn r#get_buffer_collection_id(
9555        &self,
9556        ___deadline: zx::MonotonicInstant,
9557    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
9558        let _response = self.client.send_query::<
9559            fidl::encoding::EmptyPayload,
9560            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
9561        >(
9562            (),
9563            0x77d19a494b78ba8c,
9564            fidl::encoding::DynamicFlags::FLEXIBLE,
9565            ___deadline,
9566        )?
9567        .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
9568        Ok(_response)
9569    }
9570
9571    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
9572    /// created after this message to weak, which means that a client's `Node`
9573    /// client end (or a child created after this message) is not alone
9574    /// sufficient to keep allocated VMOs alive.
9575    ///
9576    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
9577    /// `close_weak_asap`.
9578    ///
9579    /// This message is only permitted before the `Node` becomes ready for
9580    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
9581    ///   * `BufferCollectionToken`: any time
9582    ///   * `BufferCollection`: before `SetConstraints`
9583    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
9584    ///
9585    /// Currently, no conversion from strong `Node` to weak `Node` after ready
9586    /// for allocation is provided, but a client can simulate that by creating
9587    /// an additional `Node` before allocation and setting that additional
9588    /// `Node` to weak, and then potentially at some point later sending
9589    /// `Release` and closing the client end of the client's strong `Node`, but
9590    /// keeping the client's weak `Node`.
9591    ///
9592    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
9593    /// collection failure (all `Node` client end(s) will see
9594    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
9595    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
9596    /// this situation until all `Node`(s) are ready for allocation. For initial
9597    /// allocation to succeed, at least one strong `Node` is required to exist
9598    /// at allocation time, but after that client receives VMO handles, that
9599    /// client can `BufferCollection.Release` and close the client end without
9600    /// causing this type of failure.
9601    ///
9602    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
9603    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
9604    /// separately as appropriate.
9605    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
9606        self.client.send::<fidl::encoding::EmptyPayload>(
9607            (),
9608            0x22dd3ea514eeffe1,
9609            fidl::encoding::DynamicFlags::FLEXIBLE,
9610        )
9611    }
9612
9613    /// This indicates to sysmem that the client is prepared to pay attention to
9614    /// `close_weak_asap`.
9615    ///
9616    /// If sent, this message must be before
9617    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
9618    ///
9619    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
9620    /// send this message before `WaitForAllBuffersAllocated`, or a parent
9621    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
9622    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
9623    /// trigger buffer collection failure.
9624    ///
9625    /// This message is necessary because weak sysmem VMOs have not always been
9626    /// a thing, so older clients are not aware of the need to pay attention to
9627    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
9628    /// sysmem weak VMO handles asap. By having this message and requiring
9629    /// participants to indicate their acceptance of this aspect of the overall
9630    /// protocol, we avoid situations where an older client is delivered a weak
9631    /// VMO without any way for sysmem to get that VMO to close quickly later
9632    /// (and on a per-buffer basis).
9633    ///
9634    /// A participant that doesn't handle `close_weak_asap` and also doesn't
9635    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
9636    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
9637    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
9638    /// same participant has a child/delegate which does retrieve VMOs, that
9639    /// child/delegate will need to send `SetWeakOk` before
9640    /// `WaitForAllBuffersAllocated`.
9641    ///
9642    /// + request `for_child_nodes_also` If present and true, this means direct
9643    ///   child nodes of this node created after this message plus all
9644    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
9645    ///   those nodes. Any child node of this node that was created before this
9646    ///   message is not included. This setting is "sticky" in the sense that a
9647    ///   subsequent `SetWeakOk` without this bool set to true does not reset
9648    ///   the server-side bool. If this creates a problem for a participant, a
9649    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
9650    ///   tokens instead, as appropriate. A participant should only set
9651    ///   `for_child_nodes_also` true if the participant can really promise to
9652    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
9653    ///   weak VMO handles held by participants holding the corresponding child
9654    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
9655    ///   which are using sysmem(1) can be weak, despite the clients of those
9656    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
9657    ///   direct way to find out about `close_weak_asap`. This only applies to
9658    ///   descendents of this `Node` which are using sysmem(1), not to this
9659    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
9660    ///   token, which will fail allocation unless an ancestor of this `Node`
9661    ///   specified `for_child_nodes_also` true.
9662    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
9663        self.client.send::<NodeSetWeakOkRequest>(
9664            &mut payload,
9665            0x38a44fc4d7724be9,
9666            fidl::encoding::DynamicFlags::FLEXIBLE,
9667        )
9668    }
9669
9670    /// The server_end will be closed after this `Node` and any child nodes have
9671    /// have released their buffer counts, making those counts available for
9672    /// reservation by a different `Node` via
9673    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
9674    ///
9675    /// The `Node` buffer counts may not be released until the entire tree of
9676    /// `Node`(s) is closed or failed, because
9677    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
9678    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
9679    /// `Node` buffer counts remain reserved until the orphaned node is later
9680    /// cleaned up.
9681    ///
9682    /// If the `Node` exceeds a fairly large number of attached eventpair server
9683    /// ends, a log message will indicate this and the `Node` (and the
9684    /// appropriate) sub-tree will fail.
9685    ///
9686    /// The `server_end` will remain open when
9687    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
9688    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
9689    /// [`fuchsia.sysmem2/BufferCollection`].
9690    ///
9691    /// This message can also be used with a
9692    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
9693    pub fn r#attach_node_tracking(
9694        &self,
9695        mut payload: NodeAttachNodeTrackingRequest,
9696    ) -> Result<(), fidl::Error> {
9697        self.client.send::<NodeAttachNodeTrackingRequest>(
9698            &mut payload,
9699            0x3f22f2a293d3cdac,
9700            fidl::encoding::DynamicFlags::FLEXIBLE,
9701        )
9702    }
9703
9704    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
9705    /// (including its children) will be selected during allocation (or logical
9706    /// allocation).
9707    ///
9708    /// Before passing the client end of this token to
9709    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
9710    /// [`fuchsia.sysmem2/Node.Sync`] after
9711    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
9712    /// Or the client can use
9713    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
9714    /// essentially includes the `Sync`.
9715    ///
9716    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9717    /// fail the group's subtree and close the connection.
9718    ///
9719    /// After all children have been created, send AllChildrenPresent.
9720    ///
9721    /// + request `token_request` The server end of the new token channel.
9722    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
9723    ///   token allows the holder to get the same rights to buffers as the
9724    ///   parent token (of the group) had. When the value isn't
9725    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
9726    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
9727    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
9728    ///   causes subtree failure.
9729    pub fn r#create_child(
9730        &self,
9731        mut payload: BufferCollectionTokenGroupCreateChildRequest,
9732    ) -> Result<(), fidl::Error> {
9733        self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
9734            &mut payload,
9735            0x41a0075d419f30c5,
9736            fidl::encoding::DynamicFlags::FLEXIBLE,
9737        )
9738    }
9739
9740    /// Create 1 or more child tokens at once, synchronously.  In contrast to
9741    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
9742    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
9743    /// of a returned token to
9744    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
9745    ///
9746    /// The lower-index child tokens are higher priority (attempted sooner) than
9747    /// higher-index child tokens.
9748    ///
9749    /// As per all child tokens, successful aggregation will choose exactly one
9750    /// child among all created children (across all children created across
9751    /// potentially multiple calls to
9752    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
9753    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
9754    ///
9755    /// The maximum permissible total number of children per group, and total
9756    /// number of nodes in an overall tree (from the root) are capped to limits
9757    /// which are not configurable via these protocols.
9758    ///
9759    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
9760    /// this will fail the group's subtree and close the connection.
9761    ///
9762    /// After all children have been created, send AllChildrenPresent.
9763    ///
9764    /// + request `rights_attentuation_masks` The size of the
9765    ///   `rights_attentuation_masks` determines the number of created child
9766    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
9767    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
9768    ///   other value, each 0 bit in the mask attenuates that right.
9769    /// - response `tokens` The created child tokens.
9770    pub fn r#create_children_sync(
9771        &self,
9772        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9773        ___deadline: zx::MonotonicInstant,
9774    ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
9775        let _response = self.client.send_query::<
9776            BufferCollectionTokenGroupCreateChildrenSyncRequest,
9777            fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
9778        >(
9779            payload,
9780            0x15dea448c536070a,
9781            fidl::encoding::DynamicFlags::FLEXIBLE,
9782            ___deadline,
9783        )?
9784        .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
9785        Ok(_response)
9786    }
9787
9788    /// Indicate that no more children will be created.
9789    ///
9790    /// After creating all children, the client should send
9791    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
9792    /// inform sysmem that no more children will be created, so that sysmem can
9793    /// know when it's ok to start aggregating constraints.
9794    ///
9795    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9796    /// fail the group's subtree and close the connection.
9797    ///
9798    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
9799    /// after `AllChildrenPresent`, else failure of the group's subtree will be
9800    /// triggered. This is intentionally not analogous to how `Release` without
9801    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
9802    /// subtree failure.
9803    pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
9804        self.client.send::<fidl::encoding::EmptyPayload>(
9805            (),
9806            0x5c327e4a23391312,
9807            fidl::encoding::DynamicFlags::FLEXIBLE,
9808        )
9809    }
9810}
9811
9812#[derive(Debug, Clone)]
9813pub struct BufferCollectionTokenGroupProxy {
9814    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
9815}
9816
9817impl fidl::endpoints::Proxy for BufferCollectionTokenGroupProxy {
9818    type Protocol = BufferCollectionTokenGroupMarker;
9819
9820    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
9821        Self::new(inner)
9822    }
9823
9824    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
9825        self.client.into_channel().map_err(|client| Self { client })
9826    }
9827
9828    fn as_channel(&self) -> &::fidl::AsyncChannel {
9829        self.client.as_channel()
9830    }
9831}
9832
9833impl BufferCollectionTokenGroupProxy {
9834    /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionTokenGroup.
9835    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
9836        let protocol_name =
9837            <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9838        Self { client: fidl::client::Client::new(channel, protocol_name) }
9839    }
9840
9841    /// Get a Stream of events from the remote end of the protocol.
9842    ///
9843    /// # Panics
9844    ///
9845    /// Panics if the event stream was already taken.
9846    pub fn take_event_stream(&self) -> BufferCollectionTokenGroupEventStream {
9847        BufferCollectionTokenGroupEventStream { event_receiver: self.client.take_event_receiver() }
9848    }
9849
9850    /// Ensure that previous messages have been received server side. This is
9851    /// particularly useful after previous messages that created new tokens,
9852    /// because a token must be known to the sysmem server before sending the
9853    /// token to another participant.
9854    ///
9855    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9856    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9857    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9858    /// to mitigate the possibility of a hostile/fake
9859    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9860    /// Another way is to pass the token to
9861    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9862    /// the token as part of exchanging it for a
9863    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9864    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9865    /// of stalling.
9866    ///
9867    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9868    /// and then starting and completing a `Sync`, it's then safe to send the
9869    /// `BufferCollectionToken` client ends to other participants knowing the
9870    /// server will recognize the tokens when they're sent by the other
9871    /// participants to sysmem in a
9872    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9873    /// efficient way to create tokens while avoiding unnecessary round trips.
9874    ///
9875    /// Other options include waiting for each
9876    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9877    /// individually (using separate call to `Sync` after each), or calling
9878    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9879    /// converted to a `BufferCollection` via
9880    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9881    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9882    /// the sync step and can create multiple tokens at once.
9883    pub fn r#sync(
9884        &self,
9885    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
9886        BufferCollectionTokenGroupProxyInterface::r#sync(self)
9887    }
9888
9889    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9890    ///
9891    /// Normally a participant will convert a `BufferCollectionToken` into a
9892    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9893    /// `Release` via the token (and then close the channel immediately or
9894    /// shortly later in response to server closing the server end), which
9895    /// avoids causing buffer collection failure. Without a prior `Release`,
9896    /// closing the `BufferCollectionToken` client end will cause buffer
9897    /// collection failure.
9898    ///
9899    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9900    ///
9901    /// By default the server handles unexpected closure of a
9902    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9903    /// first) by failing the buffer collection. Partly this is to expedite
9904    /// closing VMO handles to reclaim memory when any participant fails. If a
9905    /// participant would like to cleanly close a `BufferCollection` without
9906    /// causing buffer collection failure, the participant can send `Release`
9907    /// before closing the `BufferCollection` client end. The `Release` can
9908    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
9909    /// buffer collection won't require constraints from this node in order to
9910    /// allocate. If after `SetConstraints`, the constraints are retained and
9911    /// aggregated, despite the lack of `BufferCollection` connection at the
9912    /// time of constraints aggregation.
9913    ///
9914    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
9915    ///
9916    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
9917    /// end (without `Release` first) will trigger failure of the buffer
9918    /// collection. To close a `BufferCollectionTokenGroup` channel without
9919    /// failing the buffer collection, ensure that AllChildrenPresent() has been
9920    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
9921    /// client end.
9922    ///
9923    /// If `Release` occurs before
9924    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
9925    /// buffer collection will fail (triggered by reception of `Release` without
9926    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
9927    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
9928    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
9929    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
9930    /// close requires `AllChildrenPresent` (if not already sent), then
9931    /// `Release`, then close client end.
9932    ///
9933    /// If `Release` occurs after `AllChildrenPresent`, the children and all
9934    /// their constraints remain intact (just as they would if the
9935    /// `BufferCollectionTokenGroup` channel had remained open), and the client
9936    /// end close doesn't trigger buffer collection failure.
9937    ///
9938    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
9939    ///
9940    /// For brevity, the per-channel-protocol paragraphs above ignore the
9941    /// separate failure domain created by
9942    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
9943    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
9944    /// unexpectedly closes (without `Release` first) and that client end is
9945    /// under a failure domain, instead of failing the whole buffer collection,
9946    /// the failure domain is failed, but the buffer collection itself is
9947    /// isolated from failure of the failure domain. Such failure domains can be
9948    /// nested, in which case only the inner-most failure domain in which the
9949    /// `Node` resides fails.
9950    pub fn r#release(&self) -> Result<(), fidl::Error> {
9951        BufferCollectionTokenGroupProxyInterface::r#release(self)
9952    }
9953
9954    /// Set a name for VMOs in this buffer collection.
9955    ///
9956    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
9957    /// will be truncated to fit. The name of the vmo will be suffixed with the
9958    /// buffer index within the collection (if the suffix fits within
9959    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
9960    /// listed in the inspect data.
9961    ///
9962    /// The name only affects VMOs allocated after the name is set; this call
9963    /// does not rename existing VMOs. If multiple clients set different names
9964    /// then the larger priority value will win. Setting a new name with the
9965    /// same priority as a prior name doesn't change the name.
9966    ///
9967    /// All table fields are currently required.
9968    ///
9969    /// + request `priority` The name is only set if this is the first `SetName`
9970    ///   or if `priority` is greater than any previous `priority` value in
9971    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
9972    /// + request `name` The name for VMOs created under this buffer collection.
9973    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
9974        BufferCollectionTokenGroupProxyInterface::r#set_name(self, payload)
9975    }
9976
9977    /// Set information about the current client that can be used by sysmem to
9978    /// help diagnose leaking memory and allocation stalls waiting for a
9979    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
9980    ///
9981    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
9982    /// `Node`(s) derived from this `Node`, unless overriden by
9983    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
9984    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
9985    ///
9986    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
9987    /// `Allocator` is the most efficient way to ensure that all
9988    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
9989    /// set, and is also more efficient than separately sending the same debug
9990    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
9991    /// created [`fuchsia.sysmem2/Node`].
9992    ///
9993    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
9994    /// indicate which client is closing their channel first, leading to subtree
9995    /// failure (which can be normal if the purpose of the subtree is over, but
9996    /// if happening earlier than expected, the client-channel-specific name can
9997    /// help diagnose where the failure is first coming from, from sysmem's
9998    /// point of view).
9999    ///
10000    /// All table fields are currently required.
10001    ///
10002    /// + request `name` This can be an arbitrary string, but the current
10003    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
10004    /// + request `id` This can be an arbitrary id, but the current process ID
10005    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
10006    pub fn r#set_debug_client_info(
10007        &self,
10008        mut payload: &NodeSetDebugClientInfoRequest,
10009    ) -> Result<(), fidl::Error> {
10010        BufferCollectionTokenGroupProxyInterface::r#set_debug_client_info(self, payload)
10011    }
10012
10013    /// Sysmem logs a warning if sysmem hasn't seen
10014    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
10015    /// within 5 seconds after creation of a new collection.
10016    ///
10017    /// Clients can call this method to change when the log is printed. If
10018    /// multiple client set the deadline, it's unspecified which deadline will
10019    /// take effect.
10020    ///
10021    /// In most cases the default works well.
10022    ///
10023    /// All table fields are currently required.
10024    ///
10025    /// + request `deadline` The time at which sysmem will start trying to log
10026    ///   the warning, unless all constraints are with sysmem by then.
10027    pub fn r#set_debug_timeout_log_deadline(
10028        &self,
10029        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10030    ) -> Result<(), fidl::Error> {
10031        BufferCollectionTokenGroupProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
10032    }
10033
10034    /// This enables verbose logging for the buffer collection.
10035    ///
10036    /// Verbose logging includes constraints set via
10037    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
10038    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
10039    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
10040    /// the tree of `Node`(s).
10041    ///
10042    /// Normally sysmem prints only a single line complaint when aggregation
10043    /// fails, with just the specific detailed reason that aggregation failed,
10044    /// with little surrounding context.  While this is often enough to diagnose
10045    /// a problem if only a small change was made and everything was working
10046    /// before the small change, it's often not particularly helpful for getting
10047    /// a new buffer collection to work for the first time.  Especially with
10048    /// more complex trees of nodes, involving things like
10049    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
10050    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
10051    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
10052    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
10053    /// looks like and why it's failing a logical allocation, or why a tree or
10054    /// subtree is failing sooner than expected.
10055    ///
10056    /// The intent of the extra logging is to be acceptable from a performance
10057    /// point of view, under the assumption that verbose logging is only enabled
10058    /// on a low number of buffer collections. If we're not tracking down a bug,
10059    /// we shouldn't send this message.
10060    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10061        BufferCollectionTokenGroupProxyInterface::r#set_verbose_logging(self)
10062    }
10063
10064    /// This gets a handle that can be used as a parameter to
10065    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
10066    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
10067    /// client obtained this handle from this `Node`.
10068    ///
10069    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
10070    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
10071    /// despite the two calls typically being on different channels.
10072    ///
10073    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
10074    ///
10075    /// All table fields are currently required.
10076    ///
10077    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
10078    ///   different `Node` channel, to prove that the client obtained the handle
10079    ///   from this `Node`.
10080    pub fn r#get_node_ref(
10081        &self,
10082    ) -> fidl::client::QueryResponseFut<
10083        NodeGetNodeRefResponse,
10084        fidl::encoding::DefaultFuchsiaResourceDialect,
10085    > {
10086        BufferCollectionTokenGroupProxyInterface::r#get_node_ref(self)
10087    }
10088
10089    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
10090    /// rooted at a different child token of a common parent
10091    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
10092    /// passed-in `node_ref`.
10093    ///
10094    /// This call is for assisting with admission control de-duplication, and
10095    /// with debugging.
10096    ///
10097    /// The `node_ref` must be obtained using
10098    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
10099    ///
10100    /// The `node_ref` can be a duplicated handle; it's not necessary to call
10101    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
10102    ///
10103    /// If a calling token may not actually be a valid token at all due to a
10104    /// potentially hostile/untrusted provider of the token, call
10105    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
10106    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
10107    /// never responds due to a calling token not being a real token (not really
10108    /// talking to sysmem).  Another option is to call
10109    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
10110    /// which also validates the token along with converting it to a
10111    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
10112    ///
10113    /// All table fields are currently required.
10114    ///
10115    /// - response `is_alternate`
10116    ///   - true: The first parent node in common between the calling node and
10117    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
10118    ///     that the calling `Node` and the `node_ref` `Node` will not have both
10119    ///     their constraints apply - rather sysmem will choose one or the other
10120    ///     of the constraints - never both.  This is because only one child of
10121    ///     a `BufferCollectionTokenGroup` is selected during logical
10122    ///     allocation, with only that one child's subtree contributing to
10123    ///     constraints aggregation.
10124    ///   - false: The first parent node in common between the calling `Node`
10125    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
10126    ///     Currently, this means the first parent node in common is a
10127    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
10128    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
10129    ///     `Node` may have both their constraints apply during constraints
10130    ///     aggregation of the logical allocation, if both `Node`(s) are
10131    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
10132    ///     this case, there is no `BufferCollectionTokenGroup` that will
10133    ///     directly prevent the two `Node`(s) from both being selected and
10134    ///     their constraints both aggregated, but even when false, one or both
10135    ///     `Node`(s) may still be eliminated from consideration if one or both
10136    ///     `Node`(s) has a direct or indirect parent
10137    ///     `BufferCollectionTokenGroup` which selects a child subtree other
10138    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
10139    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
10140    ///   associated with the same buffer collection as the calling `Node`.
10141    ///   Another reason for this error is if the `node_ref` is an
10142    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
10143    ///   a real `node_ref` obtained from `GetNodeRef`.
10144    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
10145    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
10146    ///   the needed rights expected on a real `node_ref`.
10147    /// * No other failing status codes are returned by this call.  However,
10148    ///   sysmem may add additional codes in future, so the client should have
10149    ///   sensible default handling for any failing status code.
10150    pub fn r#is_alternate_for(
10151        &self,
10152        mut payload: NodeIsAlternateForRequest,
10153    ) -> fidl::client::QueryResponseFut<
10154        NodeIsAlternateForResult,
10155        fidl::encoding::DefaultFuchsiaResourceDialect,
10156    > {
10157        BufferCollectionTokenGroupProxyInterface::r#is_alternate_for(self, payload)
10158    }
10159
10160    /// Get the buffer collection ID. This ID is also available from
10161    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
10162    /// within the collection).
10163    ///
10164    /// This call is mainly useful in situations where we can't convey a
10165    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
10166    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
10167    /// handle, which can be joined back up with a `BufferCollection` client end
10168    /// that was created via a different path. Prefer to convey a
10169    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
10170    ///
10171    /// Trusting a `buffer_collection_id` value from a source other than sysmem
10172    /// is analogous to trusting a koid value from a source other than zircon.
10173    /// Both should be avoided unless really necessary, and both require
10174    /// caution. In some situations it may be reasonable to refer to a
10175    /// pre-established `BufferCollection` by `buffer_collection_id` via a
10176    /// protocol for efficiency reasons, but an incoming value purporting to be
10177    /// a `buffer_collection_id` is not sufficient alone to justify granting the
10178    /// sender of the `buffer_collection_id` any capability. The sender must
10179    /// first prove to a receiver that the sender has/had a VMO or has/had a
10180    /// `BufferCollectionToken` to the same collection by sending a handle that
10181    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
10182    /// `buffer_collection_id` value. The receiver should take care to avoid
10183    /// assuming that a sender had a `BufferCollectionToken` in cases where the
10184    /// sender has only proven that the sender had a VMO.
10185    ///
10186    /// - response `buffer_collection_id` This ID is unique per buffer
10187    ///   collection per boot. Each buffer is uniquely identified by the
10188    ///   `buffer_collection_id` and `buffer_index` together.
10189    pub fn r#get_buffer_collection_id(
10190        &self,
10191    ) -> fidl::client::QueryResponseFut<
10192        NodeGetBufferCollectionIdResponse,
10193        fidl::encoding::DefaultFuchsiaResourceDialect,
10194    > {
10195        BufferCollectionTokenGroupProxyInterface::r#get_buffer_collection_id(self)
10196    }
10197
10198    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
10199    /// created after this message to weak, which means that a client's `Node`
10200    /// client end (or a child created after this message) is not alone
10201    /// sufficient to keep allocated VMOs alive.
10202    ///
10203    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
10204    /// `close_weak_asap`.
10205    ///
10206    /// This message is only permitted before the `Node` becomes ready for
10207    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
10208    ///   * `BufferCollectionToken`: any time
10209    ///   * `BufferCollection`: before `SetConstraints`
10210    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
10211    ///
10212    /// Currently, no conversion from strong `Node` to weak `Node` after ready
10213    /// for allocation is provided, but a client can simulate that by creating
10214    /// an additional `Node` before allocation and setting that additional
10215    /// `Node` to weak, and then potentially at some point later sending
10216    /// `Release` and closing the client end of the client's strong `Node`, but
10217    /// keeping the client's weak `Node`.
10218    ///
10219    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
10220    /// collection failure (all `Node` client end(s) will see
10221    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
10222    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
10223    /// this situation until all `Node`(s) are ready for allocation. For initial
10224    /// allocation to succeed, at least one strong `Node` is required to exist
10225    /// at allocation time, but after that client receives VMO handles, that
10226    /// client can `BufferCollection.Release` and close the client end without
10227    /// causing this type of failure.
10228    ///
10229    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
10230    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
10231    /// separately as appropriate.
10232    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
10233        BufferCollectionTokenGroupProxyInterface::r#set_weak(self)
10234    }
10235
10236    /// This indicates to sysmem that the client is prepared to pay attention to
10237    /// `close_weak_asap`.
10238    ///
10239    /// If sent, this message must be before
10240    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
10241    ///
10242    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
10243    /// send this message before `WaitForAllBuffersAllocated`, or a parent
10244    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
10245    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
10246    /// trigger buffer collection failure.
10247    ///
10248    /// This message is necessary because weak sysmem VMOs have not always been
10249    /// a thing, so older clients are not aware of the need to pay attention to
10250    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
10251    /// sysmem weak VMO handles asap. By having this message and requiring
10252    /// participants to indicate their acceptance of this aspect of the overall
10253    /// protocol, we avoid situations where an older client is delivered a weak
10254    /// VMO without any way for sysmem to get that VMO to close quickly later
10255    /// (and on a per-buffer basis).
10256    ///
10257    /// A participant that doesn't handle `close_weak_asap` and also doesn't
10258    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
10259    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
10260    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
10261    /// same participant has a child/delegate which does retrieve VMOs, that
10262    /// child/delegate will need to send `SetWeakOk` before
10263    /// `WaitForAllBuffersAllocated`.
10264    ///
10265    /// + request `for_child_nodes_also` If present and true, this means direct
10266    ///   child nodes of this node created after this message plus all
10267    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
10268    ///   those nodes. Any child node of this node that was created before this
10269    ///   message is not included. This setting is "sticky" in the sense that a
10270    ///   subsequent `SetWeakOk` without this bool set to true does not reset
10271    ///   the server-side bool. If this creates a problem for a participant, a
10272    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
10273    ///   tokens instead, as appropriate. A participant should only set
10274    ///   `for_child_nodes_also` true if the participant can really promise to
10275    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
10276    ///   weak VMO handles held by participants holding the corresponding child
10277    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
10278    ///   which are using sysmem(1) can be weak, despite the clients of those
10279    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
10280    ///   direct way to find out about `close_weak_asap`. This only applies to
10281    ///   descendents of this `Node` which are using sysmem(1), not to this
10282    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
10283    ///   token, which will fail allocation unless an ancestor of this `Node`
10284    ///   specified `for_child_nodes_also` true.
10285    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10286        BufferCollectionTokenGroupProxyInterface::r#set_weak_ok(self, payload)
10287    }
10288
10289    /// The server_end will be closed after this `Node` and any child nodes have
10290    /// have released their buffer counts, making those counts available for
10291    /// reservation by a different `Node` via
10292    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
10293    ///
10294    /// The `Node` buffer counts may not be released until the entire tree of
10295    /// `Node`(s) is closed or failed, because
10296    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
10297    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
10298    /// `Node` buffer counts remain reserved until the orphaned node is later
10299    /// cleaned up.
10300    ///
10301    /// If the `Node` exceeds a fairly large number of attached eventpair server
10302    /// ends, a log message will indicate this and the `Node` (and the
10303    /// appropriate) sub-tree will fail.
10304    ///
10305    /// The `server_end` will remain open when
10306    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
10307    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
10308    /// [`fuchsia.sysmem2/BufferCollection`].
10309    ///
10310    /// This message can also be used with a
10311    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
10312    pub fn r#attach_node_tracking(
10313        &self,
10314        mut payload: NodeAttachNodeTrackingRequest,
10315    ) -> Result<(), fidl::Error> {
10316        BufferCollectionTokenGroupProxyInterface::r#attach_node_tracking(self, payload)
10317    }
10318
10319    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
10320    /// (including its children) will be selected during allocation (or logical
10321    /// allocation).
10322    ///
10323    /// Before passing the client end of this token to
10324    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
10325    /// [`fuchsia.sysmem2/Node.Sync`] after
10326    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
10327    /// Or the client can use
10328    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
10329    /// essentially includes the `Sync`.
10330    ///
10331    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10332    /// fail the group's subtree and close the connection.
10333    ///
10334    /// After all children have been created, send AllChildrenPresent.
10335    ///
10336    /// + request `token_request` The server end of the new token channel.
10337    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
10338    ///   token allows the holder to get the same rights to buffers as the
10339    ///   parent token (of the group) had. When the value isn't
10340    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
10341    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
10342    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
10343    ///   causes subtree failure.
10344    pub fn r#create_child(
10345        &self,
10346        mut payload: BufferCollectionTokenGroupCreateChildRequest,
10347    ) -> Result<(), fidl::Error> {
10348        BufferCollectionTokenGroupProxyInterface::r#create_child(self, payload)
10349    }
10350
10351    /// Create 1 or more child tokens at once, synchronously.  In contrast to
10352    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
10353    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
10354    /// of a returned token to
10355    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
10356    ///
10357    /// The lower-index child tokens are higher priority (attempted sooner) than
10358    /// higher-index child tokens.
10359    ///
10360    /// As per all child tokens, successful aggregation will choose exactly one
10361    /// child among all created children (across all children created across
10362    /// potentially multiple calls to
10363    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
10364    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
10365    ///
10366    /// The maximum permissible total number of children per group, and total
10367    /// number of nodes in an overall tree (from the root) are capped to limits
10368    /// which are not configurable via these protocols.
10369    ///
10370    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
10371    /// this will fail the group's subtree and close the connection.
10372    ///
10373    /// After all children have been created, send AllChildrenPresent.
10374    ///
10375    /// + request `rights_attentuation_masks` The size of the
10376    ///   `rights_attentuation_masks` determines the number of created child
10377    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
10378    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
10379    ///   other value, each 0 bit in the mask attenuates that right.
10380    /// - response `tokens` The created child tokens.
10381    pub fn r#create_children_sync(
10382        &self,
10383        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10384    ) -> fidl::client::QueryResponseFut<
10385        BufferCollectionTokenGroupCreateChildrenSyncResponse,
10386        fidl::encoding::DefaultFuchsiaResourceDialect,
10387    > {
10388        BufferCollectionTokenGroupProxyInterface::r#create_children_sync(self, payload)
10389    }
10390
10391    /// Indicate that no more children will be created.
10392    ///
10393    /// After creating all children, the client should send
10394    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
10395    /// inform sysmem that no more children will be created, so that sysmem can
10396    /// know when it's ok to start aggregating constraints.
10397    ///
10398    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10399    /// fail the group's subtree and close the connection.
10400    ///
10401    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
10402    /// after `AllChildrenPresent`, else failure of the group's subtree will be
10403    /// triggered. This is intentionally not analogous to how `Release` without
10404    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
10405    /// subtree failure.
10406    pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10407        BufferCollectionTokenGroupProxyInterface::r#all_children_present(self)
10408    }
10409}
10410
10411impl BufferCollectionTokenGroupProxyInterface for BufferCollectionTokenGroupProxy {
10412    type SyncResponseFut =
10413        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
10414    fn r#sync(&self) -> Self::SyncResponseFut {
10415        fn _decode(
10416            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10417        ) -> Result<(), fidl::Error> {
10418            let _response = fidl::client::decode_transaction_body::<
10419                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
10420                fidl::encoding::DefaultFuchsiaResourceDialect,
10421                0x11ac2555cf575b54,
10422            >(_buf?)?
10423            .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
10424            Ok(_response)
10425        }
10426        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
10427            (),
10428            0x11ac2555cf575b54,
10429            fidl::encoding::DynamicFlags::FLEXIBLE,
10430            _decode,
10431        )
10432    }
10433
10434    fn r#release(&self) -> Result<(), fidl::Error> {
10435        self.client.send::<fidl::encoding::EmptyPayload>(
10436            (),
10437            0x6a5cae7d6d6e04c6,
10438            fidl::encoding::DynamicFlags::FLEXIBLE,
10439        )
10440    }
10441
10442    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10443        self.client.send::<NodeSetNameRequest>(
10444            payload,
10445            0xb41f1624f48c1e9,
10446            fidl::encoding::DynamicFlags::FLEXIBLE,
10447        )
10448    }
10449
10450    fn r#set_debug_client_info(
10451        &self,
10452        mut payload: &NodeSetDebugClientInfoRequest,
10453    ) -> Result<(), fidl::Error> {
10454        self.client.send::<NodeSetDebugClientInfoRequest>(
10455            payload,
10456            0x5cde8914608d99b1,
10457            fidl::encoding::DynamicFlags::FLEXIBLE,
10458        )
10459    }
10460
10461    fn r#set_debug_timeout_log_deadline(
10462        &self,
10463        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10464    ) -> Result<(), fidl::Error> {
10465        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
10466            payload,
10467            0x716b0af13d5c0806,
10468            fidl::encoding::DynamicFlags::FLEXIBLE,
10469        )
10470    }
10471
10472    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10473        self.client.send::<fidl::encoding::EmptyPayload>(
10474            (),
10475            0x5209c77415b4dfad,
10476            fidl::encoding::DynamicFlags::FLEXIBLE,
10477        )
10478    }
10479
10480    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
10481        NodeGetNodeRefResponse,
10482        fidl::encoding::DefaultFuchsiaResourceDialect,
10483    >;
10484    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
10485        fn _decode(
10486            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10487        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
10488            let _response = fidl::client::decode_transaction_body::<
10489                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
10490                fidl::encoding::DefaultFuchsiaResourceDialect,
10491                0x5b3d0e51614df053,
10492            >(_buf?)?
10493            .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
10494            Ok(_response)
10495        }
10496        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
10497            (),
10498            0x5b3d0e51614df053,
10499            fidl::encoding::DynamicFlags::FLEXIBLE,
10500            _decode,
10501        )
10502    }
10503
10504    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
10505        NodeIsAlternateForResult,
10506        fidl::encoding::DefaultFuchsiaResourceDialect,
10507    >;
10508    fn r#is_alternate_for(
10509        &self,
10510        mut payload: NodeIsAlternateForRequest,
10511    ) -> Self::IsAlternateForResponseFut {
10512        fn _decode(
10513            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10514        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
10515            let _response = fidl::client::decode_transaction_body::<
10516                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
10517                fidl::encoding::DefaultFuchsiaResourceDialect,
10518                0x3a58e00157e0825,
10519            >(_buf?)?
10520            .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
10521            Ok(_response.map(|x| x))
10522        }
10523        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
10524            &mut payload,
10525            0x3a58e00157e0825,
10526            fidl::encoding::DynamicFlags::FLEXIBLE,
10527            _decode,
10528        )
10529    }
10530
10531    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
10532        NodeGetBufferCollectionIdResponse,
10533        fidl::encoding::DefaultFuchsiaResourceDialect,
10534    >;
10535    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
10536        fn _decode(
10537            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10538        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
10539            let _response = fidl::client::decode_transaction_body::<
10540                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
10541                fidl::encoding::DefaultFuchsiaResourceDialect,
10542                0x77d19a494b78ba8c,
10543            >(_buf?)?
10544            .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
10545            Ok(_response)
10546        }
10547        self.client.send_query_and_decode::<
10548            fidl::encoding::EmptyPayload,
10549            NodeGetBufferCollectionIdResponse,
10550        >(
10551            (),
10552            0x77d19a494b78ba8c,
10553            fidl::encoding::DynamicFlags::FLEXIBLE,
10554            _decode,
10555        )
10556    }
10557
10558    fn r#set_weak(&self) -> Result<(), fidl::Error> {
10559        self.client.send::<fidl::encoding::EmptyPayload>(
10560            (),
10561            0x22dd3ea514eeffe1,
10562            fidl::encoding::DynamicFlags::FLEXIBLE,
10563        )
10564    }
10565
10566    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10567        self.client.send::<NodeSetWeakOkRequest>(
10568            &mut payload,
10569            0x38a44fc4d7724be9,
10570            fidl::encoding::DynamicFlags::FLEXIBLE,
10571        )
10572    }
10573
10574    fn r#attach_node_tracking(
10575        &self,
10576        mut payload: NodeAttachNodeTrackingRequest,
10577    ) -> Result<(), fidl::Error> {
10578        self.client.send::<NodeAttachNodeTrackingRequest>(
10579            &mut payload,
10580            0x3f22f2a293d3cdac,
10581            fidl::encoding::DynamicFlags::FLEXIBLE,
10582        )
10583    }
10584
10585    fn r#create_child(
10586        &self,
10587        mut payload: BufferCollectionTokenGroupCreateChildRequest,
10588    ) -> Result<(), fidl::Error> {
10589        self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
10590            &mut payload,
10591            0x41a0075d419f30c5,
10592            fidl::encoding::DynamicFlags::FLEXIBLE,
10593        )
10594    }
10595
10596    type CreateChildrenSyncResponseFut = fidl::client::QueryResponseFut<
10597        BufferCollectionTokenGroupCreateChildrenSyncResponse,
10598        fidl::encoding::DefaultFuchsiaResourceDialect,
10599    >;
10600    fn r#create_children_sync(
10601        &self,
10602        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10603    ) -> Self::CreateChildrenSyncResponseFut {
10604        fn _decode(
10605            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10606        ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
10607            let _response = fidl::client::decode_transaction_body::<
10608                fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
10609                fidl::encoding::DefaultFuchsiaResourceDialect,
10610                0x15dea448c536070a,
10611            >(_buf?)?
10612            .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
10613            Ok(_response)
10614        }
10615        self.client.send_query_and_decode::<
10616            BufferCollectionTokenGroupCreateChildrenSyncRequest,
10617            BufferCollectionTokenGroupCreateChildrenSyncResponse,
10618        >(
10619            payload,
10620            0x15dea448c536070a,
10621            fidl::encoding::DynamicFlags::FLEXIBLE,
10622            _decode,
10623        )
10624    }
10625
10626    fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10627        self.client.send::<fidl::encoding::EmptyPayload>(
10628            (),
10629            0x5c327e4a23391312,
10630            fidl::encoding::DynamicFlags::FLEXIBLE,
10631        )
10632    }
10633}
10634
10635pub struct BufferCollectionTokenGroupEventStream {
10636    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
10637}
10638
10639impl std::marker::Unpin for BufferCollectionTokenGroupEventStream {}
10640
10641impl futures::stream::FusedStream for BufferCollectionTokenGroupEventStream {
10642    fn is_terminated(&self) -> bool {
10643        self.event_receiver.is_terminated()
10644    }
10645}
10646
10647impl futures::Stream for BufferCollectionTokenGroupEventStream {
10648    type Item = Result<BufferCollectionTokenGroupEvent, fidl::Error>;
10649
10650    fn poll_next(
10651        mut self: std::pin::Pin<&mut Self>,
10652        cx: &mut std::task::Context<'_>,
10653    ) -> std::task::Poll<Option<Self::Item>> {
10654        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
10655            &mut self.event_receiver,
10656            cx
10657        )?) {
10658            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenGroupEvent::decode(buf))),
10659            None => std::task::Poll::Ready(None),
10660        }
10661    }
10662}
10663
10664#[derive(Debug)]
10665pub enum BufferCollectionTokenGroupEvent {
10666    #[non_exhaustive]
10667    _UnknownEvent {
10668        /// Ordinal of the event that was sent.
10669        ordinal: u64,
10670    },
10671}
10672
10673impl BufferCollectionTokenGroupEvent {
10674    /// Decodes a message buffer as a [`BufferCollectionTokenGroupEvent`].
10675    fn decode(
10676        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
10677    ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
10678        let (bytes, _handles) = buf.split_mut();
10679        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10680        debug_assert_eq!(tx_header.tx_id, 0);
10681        match tx_header.ordinal {
10682            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
10683                Ok(BufferCollectionTokenGroupEvent::_UnknownEvent {
10684                    ordinal: tx_header.ordinal,
10685                })
10686            }
10687            _ => Err(fidl::Error::UnknownOrdinal {
10688                ordinal: tx_header.ordinal,
10689                protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
10690            })
10691        }
10692    }
10693}
10694
10695/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionTokenGroup.
10696pub struct BufferCollectionTokenGroupRequestStream {
10697    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10698    is_terminated: bool,
10699}
10700
10701impl std::marker::Unpin for BufferCollectionTokenGroupRequestStream {}
10702
10703impl futures::stream::FusedStream for BufferCollectionTokenGroupRequestStream {
10704    fn is_terminated(&self) -> bool {
10705        self.is_terminated
10706    }
10707}
10708
10709impl fidl::endpoints::RequestStream for BufferCollectionTokenGroupRequestStream {
10710    type Protocol = BufferCollectionTokenGroupMarker;
10711    type ControlHandle = BufferCollectionTokenGroupControlHandle;
10712
10713    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
10714        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
10715    }
10716
10717    fn control_handle(&self) -> Self::ControlHandle {
10718        BufferCollectionTokenGroupControlHandle { inner: self.inner.clone() }
10719    }
10720
10721    fn into_inner(
10722        self,
10723    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
10724    {
10725        (self.inner, self.is_terminated)
10726    }
10727
10728    fn from_inner(
10729        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10730        is_terminated: bool,
10731    ) -> Self {
10732        Self { inner, is_terminated }
10733    }
10734}
10735
10736impl futures::Stream for BufferCollectionTokenGroupRequestStream {
10737    type Item = Result<BufferCollectionTokenGroupRequest, fidl::Error>;
10738
10739    fn poll_next(
10740        mut self: std::pin::Pin<&mut Self>,
10741        cx: &mut std::task::Context<'_>,
10742    ) -> std::task::Poll<Option<Self::Item>> {
10743        let this = &mut *self;
10744        if this.inner.check_shutdown(cx) {
10745            this.is_terminated = true;
10746            return std::task::Poll::Ready(None);
10747        }
10748        if this.is_terminated {
10749            panic!("polled BufferCollectionTokenGroupRequestStream after completion");
10750        }
10751        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
10752            |bytes, handles| {
10753                match this.inner.channel().read_etc(cx, bytes, handles) {
10754                    std::task::Poll::Ready(Ok(())) => {}
10755                    std::task::Poll::Pending => return std::task::Poll::Pending,
10756                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
10757                        this.is_terminated = true;
10758                        return std::task::Poll::Ready(None);
10759                    }
10760                    std::task::Poll::Ready(Err(e)) => {
10761                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
10762                            e.into(),
10763                        ))))
10764                    }
10765                }
10766
10767                // A message has been received from the channel
10768                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10769
10770                std::task::Poll::Ready(Some(match header.ordinal {
10771                0x11ac2555cf575b54 => {
10772                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10773                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10774                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10775                    let control_handle = BufferCollectionTokenGroupControlHandle {
10776                        inner: this.inner.clone(),
10777                    };
10778                    Ok(BufferCollectionTokenGroupRequest::Sync {
10779                        responder: BufferCollectionTokenGroupSyncResponder {
10780                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10781                            tx_id: header.tx_id,
10782                        },
10783                    })
10784                }
10785                0x6a5cae7d6d6e04c6 => {
10786                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10787                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10788                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10789                    let control_handle = BufferCollectionTokenGroupControlHandle {
10790                        inner: this.inner.clone(),
10791                    };
10792                    Ok(BufferCollectionTokenGroupRequest::Release {
10793                        control_handle,
10794                    })
10795                }
10796                0xb41f1624f48c1e9 => {
10797                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10798                    let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10799                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
10800                    let control_handle = BufferCollectionTokenGroupControlHandle {
10801                        inner: this.inner.clone(),
10802                    };
10803                    Ok(BufferCollectionTokenGroupRequest::SetName {payload: req,
10804                        control_handle,
10805                    })
10806                }
10807                0x5cde8914608d99b1 => {
10808                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10809                    let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10810                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
10811                    let control_handle = BufferCollectionTokenGroupControlHandle {
10812                        inner: this.inner.clone(),
10813                    };
10814                    Ok(BufferCollectionTokenGroupRequest::SetDebugClientInfo {payload: req,
10815                        control_handle,
10816                    })
10817                }
10818                0x716b0af13d5c0806 => {
10819                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10820                    let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10821                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
10822                    let control_handle = BufferCollectionTokenGroupControlHandle {
10823                        inner: this.inner.clone(),
10824                    };
10825                    Ok(BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {payload: req,
10826                        control_handle,
10827                    })
10828                }
10829                0x5209c77415b4dfad => {
10830                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10831                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10832                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10833                    let control_handle = BufferCollectionTokenGroupControlHandle {
10834                        inner: this.inner.clone(),
10835                    };
10836                    Ok(BufferCollectionTokenGroupRequest::SetVerboseLogging {
10837                        control_handle,
10838                    })
10839                }
10840                0x5b3d0e51614df053 => {
10841                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10842                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10843                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10844                    let control_handle = BufferCollectionTokenGroupControlHandle {
10845                        inner: this.inner.clone(),
10846                    };
10847                    Ok(BufferCollectionTokenGroupRequest::GetNodeRef {
10848                        responder: BufferCollectionTokenGroupGetNodeRefResponder {
10849                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10850                            tx_id: header.tx_id,
10851                        },
10852                    })
10853                }
10854                0x3a58e00157e0825 => {
10855                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10856                    let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10857                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
10858                    let control_handle = BufferCollectionTokenGroupControlHandle {
10859                        inner: this.inner.clone(),
10860                    };
10861                    Ok(BufferCollectionTokenGroupRequest::IsAlternateFor {payload: req,
10862                        responder: BufferCollectionTokenGroupIsAlternateForResponder {
10863                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10864                            tx_id: header.tx_id,
10865                        },
10866                    })
10867                }
10868                0x77d19a494b78ba8c => {
10869                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10870                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10871                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10872                    let control_handle = BufferCollectionTokenGroupControlHandle {
10873                        inner: this.inner.clone(),
10874                    };
10875                    Ok(BufferCollectionTokenGroupRequest::GetBufferCollectionId {
10876                        responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder {
10877                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10878                            tx_id: header.tx_id,
10879                        },
10880                    })
10881                }
10882                0x22dd3ea514eeffe1 => {
10883                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10884                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10885                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10886                    let control_handle = BufferCollectionTokenGroupControlHandle {
10887                        inner: this.inner.clone(),
10888                    };
10889                    Ok(BufferCollectionTokenGroupRequest::SetWeak {
10890                        control_handle,
10891                    })
10892                }
10893                0x38a44fc4d7724be9 => {
10894                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10895                    let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10896                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
10897                    let control_handle = BufferCollectionTokenGroupControlHandle {
10898                        inner: this.inner.clone(),
10899                    };
10900                    Ok(BufferCollectionTokenGroupRequest::SetWeakOk {payload: req,
10901                        control_handle,
10902                    })
10903                }
10904                0x3f22f2a293d3cdac => {
10905                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10906                    let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10907                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
10908                    let control_handle = BufferCollectionTokenGroupControlHandle {
10909                        inner: this.inner.clone(),
10910                    };
10911                    Ok(BufferCollectionTokenGroupRequest::AttachNodeTracking {payload: req,
10912                        control_handle,
10913                    })
10914                }
10915                0x41a0075d419f30c5 => {
10916                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10917                    let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10918                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildRequest>(&header, _body_bytes, handles, &mut req)?;
10919                    let control_handle = BufferCollectionTokenGroupControlHandle {
10920                        inner: this.inner.clone(),
10921                    };
10922                    Ok(BufferCollectionTokenGroupRequest::CreateChild {payload: req,
10923                        control_handle,
10924                    })
10925                }
10926                0x15dea448c536070a => {
10927                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10928                    let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildrenSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10929                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildrenSyncRequest>(&header, _body_bytes, handles, &mut req)?;
10930                    let control_handle = BufferCollectionTokenGroupControlHandle {
10931                        inner: this.inner.clone(),
10932                    };
10933                    Ok(BufferCollectionTokenGroupRequest::CreateChildrenSync {payload: req,
10934                        responder: BufferCollectionTokenGroupCreateChildrenSyncResponder {
10935                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10936                            tx_id: header.tx_id,
10937                        },
10938                    })
10939                }
10940                0x5c327e4a23391312 => {
10941                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10942                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10943                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10944                    let control_handle = BufferCollectionTokenGroupControlHandle {
10945                        inner: this.inner.clone(),
10946                    };
10947                    Ok(BufferCollectionTokenGroupRequest::AllChildrenPresent {
10948                        control_handle,
10949                    })
10950                }
10951                _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
10952                    Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
10953                        ordinal: header.ordinal,
10954                        control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
10955                        method_type: fidl::MethodType::OneWay,
10956                    })
10957                }
10958                _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
10959                    this.inner.send_framework_err(
10960                        fidl::encoding::FrameworkErr::UnknownMethod,
10961                        header.tx_id,
10962                        header.ordinal,
10963                        header.dynamic_flags(),
10964                        (bytes, handles),
10965                    )?;
10966                    Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
10967                        ordinal: header.ordinal,
10968                        control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
10969                        method_type: fidl::MethodType::TwoWay,
10970                    })
10971                }
10972                _ => Err(fidl::Error::UnknownOrdinal {
10973                    ordinal: header.ordinal,
10974                    protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
10975                }),
10976            }))
10977            },
10978        )
10979    }
10980}
10981
10982/// The sysmem implementation is consistent with a logical / conceptual model of
10983/// allocation / logical allocation as follows:
10984///
10985/// As usual, a logical allocation considers either the root and all nodes with
10986/// connectivity to the root that don't transit a [`fuchsia.sysmem2/Node`]
10987/// created with [`fuchsia.sysmem2/BufferCollection.AttachToken`], or a subtree
10988/// rooted at an `AttachToken` `Node` and all `Node`(s) with connectivity to
10989/// that subtree that don't transit another `AttachToken`.  This is called the
10990/// logical allocation pruned subtree, or pruned subtree for short.
10991///
10992/// During constraints aggregation, each
10993/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] will select a single child
10994/// `Node` among its direct children. The rest of the children will appear to
10995/// fail the logical allocation, while the selected child may succeed.
10996///
10997/// When more than one `BufferCollectionTokenGroup` exists in the overall
10998/// logical allocation pruned subtree, the relative priority between two groups
10999/// is equivalent to their ordering in a DFS pre-order iteration of the tree,
11000/// with parents higher priority than children, and left children higher
11001/// priority than right children.
11002///
11003/// When a particular child of a group is selected (whether provisionally during
11004/// a constraints aggregation attempt, or as a final selection), the
11005/// non-selection of other children of the group will "hide" any other groups
11006/// under those non-selected children.
11007///
11008/// Within a logical allocation, aggregation is attempted first by provisionally
11009/// selecting child 0 of the highest-priority group, and child 0 of the next
11010/// highest-priority group that isn't hidden by the provisional selections so
11011/// far, etc.
11012///
11013/// If that aggregation attempt fails, aggregation will be attempted with the
11014/// ordinal 0 child of all the same groups except the lowest priority non-hidden
11015/// group which will provisionally select its ordinal 1 child (and then child 2
11016/// and so on). If a new lowest-priority group is un-hidden as provisional
11017/// selections are updated, that newly un-hidden lowest-priority group has all
11018/// its children considered in order, before changing the provisional selection
11019/// in the former lowest-priority group. In terms of result, this is equivalent
11020/// to systematic enumeration of all possible combinations of choices in a
11021/// counting-like order updating the lowest-priority group the most often and
11022/// the highest-priority group the least often. Rather than actually attempting
11023/// aggregation with all the combinations, we can skip over combinations which
11024/// are redundant/equivalent due to hiding without any change to the result.
11025///
11026/// Attempted constraint aggregations of enumerated non-equivalent combinations
11027/// of choices continue in this manner until either (a) all aggregation attempts
11028/// fail in which case the overall logical allocation fails, or (b) until an
11029/// attempted aggregation succeeds, in which case buffer allocation (if needed;
11030/// if this is the pruned subtree rooted at the overall root `Node`) is
11031/// attempted once. If buffer allocation based on the first successful
11032/// constraints aggregation fails, the overall logical allocation fails (there
11033/// is no buffer allocation retry / re-attempt). If buffer allocation succeeds
11034/// (or is not needed due to being a pruned subtree that doesn't include the
11035/// root), the logical allocation succeeds.
11036///
11037/// If this prioritization scheme cannot reasonably work for your usage of
11038/// sysmem, please don't hesitate to contact sysmem folks to discuss potentially
11039/// adding a way to achieve what you need.
11040///
11041/// Please avoid creating a large number of `BufferCollectionTokenGroup`(s) per
11042/// logical allocation, especially with large number of children overall, and
11043/// especially in cases where aggregation may reasonably be expected to often
11044/// fail using ordinal 0 children and possibly with later children as well.
11045/// Sysmem mitigates potentially high time complexity of evaluating too many
11046/// child combinations/selections across too many groups by simply failing
11047/// logical allocation beyond a certain (fairly high, but not huge) max number
11048/// of considered group child combinations/selections. More advanced (and more
11049/// complicated) mitigation is not anticipated to be practically necessary or
11050/// worth the added complexity. Please contact sysmem folks if the max limit is
11051/// getting hit or if you anticipate it getting hit, to discuss potential
11052/// options.
11053///
11054/// Prefer to use multiple [`fuchsia.sysmem2/ImageFormatConstraints`] in a
11055/// single [`fuchsia.sysmem2/BufferCollectionConstraints`] when feasible (when a
11056/// participant just needs to express the ability to work with more than a
11057/// single [`fuchsia.images2/PixelFormat`], with sysmem choosing which
11058/// `PixelFormat` to use among those supported by all participants).
11059///
11060/// Similar to [`fuchsia.sysmem2/BufferCollectionToken`] and
11061/// [`fuchsia.sysmem2/BufferCollection`], closure of the
11062/// `BufferCollectionTokenGroup` channel without sending
11063/// [`fuchsia.sysmem2/Node.Release`] first will cause buffer collection failure
11064/// (or subtree failure if using
11065/// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11066/// [`fuchsia.sysmem2/BufferCollection.AttachToken`] and the
11067/// `BufferCollectionTokenGroup` is part of a subtree under such a node that
11068/// doesn't propagate failure to its parent).
11069///
11070/// Epitaphs are not used in this protocol.
11071#[derive(Debug)]
11072pub enum BufferCollectionTokenGroupRequest {
11073    /// Ensure that previous messages have been received server side. This is
11074    /// particularly useful after previous messages that created new tokens,
11075    /// because a token must be known to the sysmem server before sending the
11076    /// token to another participant.
11077    ///
11078    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
11079    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
11080    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
11081    /// to mitigate the possibility of a hostile/fake
11082    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
11083    /// Another way is to pass the token to
11084    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
11085    /// the token as part of exchanging it for a
11086    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
11087    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
11088    /// of stalling.
11089    ///
11090    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
11091    /// and then starting and completing a `Sync`, it's then safe to send the
11092    /// `BufferCollectionToken` client ends to other participants knowing the
11093    /// server will recognize the tokens when they're sent by the other
11094    /// participants to sysmem in a
11095    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
11096    /// efficient way to create tokens while avoiding unnecessary round trips.
11097    ///
11098    /// Other options include waiting for each
11099    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
11100    /// individually (using separate call to `Sync` after each), or calling
11101    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
11102    /// converted to a `BufferCollection` via
11103    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
11104    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
11105    /// the sync step and can create multiple tokens at once.
11106    Sync { responder: BufferCollectionTokenGroupSyncResponder },
11107    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
11108    ///
11109    /// Normally a participant will convert a `BufferCollectionToken` into a
11110    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
11111    /// `Release` via the token (and then close the channel immediately or
11112    /// shortly later in response to server closing the server end), which
11113    /// avoids causing buffer collection failure. Without a prior `Release`,
11114    /// closing the `BufferCollectionToken` client end will cause buffer
11115    /// collection failure.
11116    ///
11117    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
11118    ///
11119    /// By default the server handles unexpected closure of a
11120    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
11121    /// first) by failing the buffer collection. Partly this is to expedite
11122    /// closing VMO handles to reclaim memory when any participant fails. If a
11123    /// participant would like to cleanly close a `BufferCollection` without
11124    /// causing buffer collection failure, the participant can send `Release`
11125    /// before closing the `BufferCollection` client end. The `Release` can
11126    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
11127    /// buffer collection won't require constraints from this node in order to
11128    /// allocate. If after `SetConstraints`, the constraints are retained and
11129    /// aggregated, despite the lack of `BufferCollection` connection at the
11130    /// time of constraints aggregation.
11131    ///
11132    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
11133    ///
11134    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
11135    /// end (without `Release` first) will trigger failure of the buffer
11136    /// collection. To close a `BufferCollectionTokenGroup` channel without
11137    /// failing the buffer collection, ensure that AllChildrenPresent() has been
11138    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
11139    /// client end.
11140    ///
11141    /// If `Release` occurs before
11142    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
11143    /// buffer collection will fail (triggered by reception of `Release` without
11144    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
11145    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
11146    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
11147    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
11148    /// close requires `AllChildrenPresent` (if not already sent), then
11149    /// `Release`, then close client end.
11150    ///
11151    /// If `Release` occurs after `AllChildrenPresent`, the children and all
11152    /// their constraints remain intact (just as they would if the
11153    /// `BufferCollectionTokenGroup` channel had remained open), and the client
11154    /// end close doesn't trigger buffer collection failure.
11155    ///
11156    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
11157    ///
11158    /// For brevity, the per-channel-protocol paragraphs above ignore the
11159    /// separate failure domain created by
11160    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11161    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
11162    /// unexpectedly closes (without `Release` first) and that client end is
11163    /// under a failure domain, instead of failing the whole buffer collection,
11164    /// the failure domain is failed, but the buffer collection itself is
11165    /// isolated from failure of the failure domain. Such failure domains can be
11166    /// nested, in which case only the inner-most failure domain in which the
11167    /// `Node` resides fails.
11168    Release { control_handle: BufferCollectionTokenGroupControlHandle },
11169    /// Set a name for VMOs in this buffer collection.
11170    ///
11171    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
11172    /// will be truncated to fit. The name of the vmo will be suffixed with the
11173    /// buffer index within the collection (if the suffix fits within
11174    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
11175    /// listed in the inspect data.
11176    ///
11177    /// The name only affects VMOs allocated after the name is set; this call
11178    /// does not rename existing VMOs. If multiple clients set different names
11179    /// then the larger priority value will win. Setting a new name with the
11180    /// same priority as a prior name doesn't change the name.
11181    ///
11182    /// All table fields are currently required.
11183    ///
11184    /// + request `priority` The name is only set if this is the first `SetName`
11185    ///   or if `priority` is greater than any previous `priority` value in
11186    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
11187    /// + request `name` The name for VMOs created under this buffer collection.
11188    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenGroupControlHandle },
11189    /// Set information about the current client that can be used by sysmem to
11190    /// help diagnose leaking memory and allocation stalls waiting for a
11191    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
11192    ///
11193    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
11194    /// `Node`(s) derived from this `Node`, unless overriden by
11195    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
11196    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
11197    ///
11198    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
11199    /// `Allocator` is the most efficient way to ensure that all
11200    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
11201    /// set, and is also more efficient than separately sending the same debug
11202    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
11203    /// created [`fuchsia.sysmem2/Node`].
11204    ///
11205    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
11206    /// indicate which client is closing their channel first, leading to subtree
11207    /// failure (which can be normal if the purpose of the subtree is over, but
11208    /// if happening earlier than expected, the client-channel-specific name can
11209    /// help diagnose where the failure is first coming from, from sysmem's
11210    /// point of view).
11211    ///
11212    /// All table fields are currently required.
11213    ///
11214    /// + request `name` This can be an arbitrary string, but the current
11215    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
11216    /// + request `id` This can be an arbitrary id, but the current process ID
11217    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
11218    SetDebugClientInfo {
11219        payload: NodeSetDebugClientInfoRequest,
11220        control_handle: BufferCollectionTokenGroupControlHandle,
11221    },
11222    /// Sysmem logs a warning if sysmem hasn't seen
11223    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
11224    /// within 5 seconds after creation of a new collection.
11225    ///
11226    /// Clients can call this method to change when the log is printed. If
11227    /// multiple client set the deadline, it's unspecified which deadline will
11228    /// take effect.
11229    ///
11230    /// In most cases the default works well.
11231    ///
11232    /// All table fields are currently required.
11233    ///
11234    /// + request `deadline` The time at which sysmem will start trying to log
11235    ///   the warning, unless all constraints are with sysmem by then.
11236    SetDebugTimeoutLogDeadline {
11237        payload: NodeSetDebugTimeoutLogDeadlineRequest,
11238        control_handle: BufferCollectionTokenGroupControlHandle,
11239    },
11240    /// This enables verbose logging for the buffer collection.
11241    ///
11242    /// Verbose logging includes constraints set via
11243    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
11244    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
11245    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
11246    /// the tree of `Node`(s).
11247    ///
11248    /// Normally sysmem prints only a single line complaint when aggregation
11249    /// fails, with just the specific detailed reason that aggregation failed,
11250    /// with little surrounding context.  While this is often enough to diagnose
11251    /// a problem if only a small change was made and everything was working
11252    /// before the small change, it's often not particularly helpful for getting
11253    /// a new buffer collection to work for the first time.  Especially with
11254    /// more complex trees of nodes, involving things like
11255    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
11256    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
11257    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
11258    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
11259    /// looks like and why it's failing a logical allocation, or why a tree or
11260    /// subtree is failing sooner than expected.
11261    ///
11262    /// The intent of the extra logging is to be acceptable from a performance
11263    /// point of view, under the assumption that verbose logging is only enabled
11264    /// on a low number of buffer collections. If we're not tracking down a bug,
11265    /// we shouldn't send this message.
11266    SetVerboseLogging { control_handle: BufferCollectionTokenGroupControlHandle },
11267    /// This gets a handle that can be used as a parameter to
11268    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
11269    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
11270    /// client obtained this handle from this `Node`.
11271    ///
11272    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
11273    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
11274    /// despite the two calls typically being on different channels.
11275    ///
11276    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
11277    ///
11278    /// All table fields are currently required.
11279    ///
11280    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
11281    ///   different `Node` channel, to prove that the client obtained the handle
11282    ///   from this `Node`.
11283    GetNodeRef { responder: BufferCollectionTokenGroupGetNodeRefResponder },
11284    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
11285    /// rooted at a different child token of a common parent
11286    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
11287    /// passed-in `node_ref`.
11288    ///
11289    /// This call is for assisting with admission control de-duplication, and
11290    /// with debugging.
11291    ///
11292    /// The `node_ref` must be obtained using
11293    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
11294    ///
11295    /// The `node_ref` can be a duplicated handle; it's not necessary to call
11296    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
11297    ///
11298    /// If a calling token may not actually be a valid token at all due to a
11299    /// potentially hostile/untrusted provider of the token, call
11300    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
11301    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
11302    /// never responds due to a calling token not being a real token (not really
11303    /// talking to sysmem).  Another option is to call
11304    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
11305    /// which also validates the token along with converting it to a
11306    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
11307    ///
11308    /// All table fields are currently required.
11309    ///
11310    /// - response `is_alternate`
11311    ///   - true: The first parent node in common between the calling node and
11312    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
11313    ///     that the calling `Node` and the `node_ref` `Node` will not have both
11314    ///     their constraints apply - rather sysmem will choose one or the other
11315    ///     of the constraints - never both.  This is because only one child of
11316    ///     a `BufferCollectionTokenGroup` is selected during logical
11317    ///     allocation, with only that one child's subtree contributing to
11318    ///     constraints aggregation.
11319    ///   - false: The first parent node in common between the calling `Node`
11320    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
11321    ///     Currently, this means the first parent node in common is a
11322    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
11323    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
11324    ///     `Node` may have both their constraints apply during constraints
11325    ///     aggregation of the logical allocation, if both `Node`(s) are
11326    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
11327    ///     this case, there is no `BufferCollectionTokenGroup` that will
11328    ///     directly prevent the two `Node`(s) from both being selected and
11329    ///     their constraints both aggregated, but even when false, one or both
11330    ///     `Node`(s) may still be eliminated from consideration if one or both
11331    ///     `Node`(s) has a direct or indirect parent
11332    ///     `BufferCollectionTokenGroup` which selects a child subtree other
11333    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
11334    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
11335    ///   associated with the same buffer collection as the calling `Node`.
11336    ///   Another reason for this error is if the `node_ref` is an
11337    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
11338    ///   a real `node_ref` obtained from `GetNodeRef`.
11339    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
11340    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
11341    ///   the needed rights expected on a real `node_ref`.
11342    /// * No other failing status codes are returned by this call.  However,
11343    ///   sysmem may add additional codes in future, so the client should have
11344    ///   sensible default handling for any failing status code.
11345    IsAlternateFor {
11346        payload: NodeIsAlternateForRequest,
11347        responder: BufferCollectionTokenGroupIsAlternateForResponder,
11348    },
11349    /// Get the buffer collection ID. This ID is also available from
11350    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
11351    /// within the collection).
11352    ///
11353    /// This call is mainly useful in situations where we can't convey a
11354    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
11355    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
11356    /// handle, which can be joined back up with a `BufferCollection` client end
11357    /// that was created via a different path. Prefer to convey a
11358    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
11359    ///
11360    /// Trusting a `buffer_collection_id` value from a source other than sysmem
11361    /// is analogous to trusting a koid value from a source other than zircon.
11362    /// Both should be avoided unless really necessary, and both require
11363    /// caution. In some situations it may be reasonable to refer to a
11364    /// pre-established `BufferCollection` by `buffer_collection_id` via a
11365    /// protocol for efficiency reasons, but an incoming value purporting to be
11366    /// a `buffer_collection_id` is not sufficient alone to justify granting the
11367    /// sender of the `buffer_collection_id` any capability. The sender must
11368    /// first prove to a receiver that the sender has/had a VMO or has/had a
11369    /// `BufferCollectionToken` to the same collection by sending a handle that
11370    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
11371    /// `buffer_collection_id` value. The receiver should take care to avoid
11372    /// assuming that a sender had a `BufferCollectionToken` in cases where the
11373    /// sender has only proven that the sender had a VMO.
11374    ///
11375    /// - response `buffer_collection_id` This ID is unique per buffer
11376    ///   collection per boot. Each buffer is uniquely identified by the
11377    ///   `buffer_collection_id` and `buffer_index` together.
11378    GetBufferCollectionId { responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder },
11379    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
11380    /// created after this message to weak, which means that a client's `Node`
11381    /// client end (or a child created after this message) is not alone
11382    /// sufficient to keep allocated VMOs alive.
11383    ///
11384    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
11385    /// `close_weak_asap`.
11386    ///
11387    /// This message is only permitted before the `Node` becomes ready for
11388    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
11389    ///   * `BufferCollectionToken`: any time
11390    ///   * `BufferCollection`: before `SetConstraints`
11391    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
11392    ///
11393    /// Currently, no conversion from strong `Node` to weak `Node` after ready
11394    /// for allocation is provided, but a client can simulate that by creating
11395    /// an additional `Node` before allocation and setting that additional
11396    /// `Node` to weak, and then potentially at some point later sending
11397    /// `Release` and closing the client end of the client's strong `Node`, but
11398    /// keeping the client's weak `Node`.
11399    ///
11400    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
11401    /// collection failure (all `Node` client end(s) will see
11402    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
11403    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
11404    /// this situation until all `Node`(s) are ready for allocation. For initial
11405    /// allocation to succeed, at least one strong `Node` is required to exist
11406    /// at allocation time, but after that client receives VMO handles, that
11407    /// client can `BufferCollection.Release` and close the client end without
11408    /// causing this type of failure.
11409    ///
11410    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
11411    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
11412    /// separately as appropriate.
11413    SetWeak { control_handle: BufferCollectionTokenGroupControlHandle },
11414    /// This indicates to sysmem that the client is prepared to pay attention to
11415    /// `close_weak_asap`.
11416    ///
11417    /// If sent, this message must be before
11418    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
11419    ///
11420    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
11421    /// send this message before `WaitForAllBuffersAllocated`, or a parent
11422    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
11423    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
11424    /// trigger buffer collection failure.
11425    ///
11426    /// This message is necessary because weak sysmem VMOs have not always been
11427    /// a thing, so older clients are not aware of the need to pay attention to
11428    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
11429    /// sysmem weak VMO handles asap. By having this message and requiring
11430    /// participants to indicate their acceptance of this aspect of the overall
11431    /// protocol, we avoid situations where an older client is delivered a weak
11432    /// VMO without any way for sysmem to get that VMO to close quickly later
11433    /// (and on a per-buffer basis).
11434    ///
11435    /// A participant that doesn't handle `close_weak_asap` and also doesn't
11436    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
11437    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
11438    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
11439    /// same participant has a child/delegate which does retrieve VMOs, that
11440    /// child/delegate will need to send `SetWeakOk` before
11441    /// `WaitForAllBuffersAllocated`.
11442    ///
11443    /// + request `for_child_nodes_also` If present and true, this means direct
11444    ///   child nodes of this node created after this message plus all
11445    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
11446    ///   those nodes. Any child node of this node that was created before this
11447    ///   message is not included. This setting is "sticky" in the sense that a
11448    ///   subsequent `SetWeakOk` without this bool set to true does not reset
11449    ///   the server-side bool. If this creates a problem for a participant, a
11450    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
11451    ///   tokens instead, as appropriate. A participant should only set
11452    ///   `for_child_nodes_also` true if the participant can really promise to
11453    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
11454    ///   weak VMO handles held by participants holding the corresponding child
11455    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
11456    ///   which are using sysmem(1) can be weak, despite the clients of those
11457    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
11458    ///   direct way to find out about `close_weak_asap`. This only applies to
11459    ///   descendents of this `Node` which are using sysmem(1), not to this
11460    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
11461    ///   token, which will fail allocation unless an ancestor of this `Node`
11462    ///   specified `for_child_nodes_also` true.
11463    SetWeakOk {
11464        payload: NodeSetWeakOkRequest,
11465        control_handle: BufferCollectionTokenGroupControlHandle,
11466    },
11467    /// The server_end will be closed after this `Node` and any child nodes have
11468    /// have released their buffer counts, making those counts available for
11469    /// reservation by a different `Node` via
11470    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
11471    ///
11472    /// The `Node` buffer counts may not be released until the entire tree of
11473    /// `Node`(s) is closed or failed, because
11474    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
11475    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
11476    /// `Node` buffer counts remain reserved until the orphaned node is later
11477    /// cleaned up.
11478    ///
11479    /// If the `Node` exceeds a fairly large number of attached eventpair server
11480    /// ends, a log message will indicate this and the `Node` (and the
11481    /// appropriate) sub-tree will fail.
11482    ///
11483    /// The `server_end` will remain open when
11484    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
11485    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
11486    /// [`fuchsia.sysmem2/BufferCollection`].
11487    ///
11488    /// This message can also be used with a
11489    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
11490    AttachNodeTracking {
11491        payload: NodeAttachNodeTrackingRequest,
11492        control_handle: BufferCollectionTokenGroupControlHandle,
11493    },
11494    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
11495    /// (including its children) will be selected during allocation (or logical
11496    /// allocation).
11497    ///
11498    /// Before passing the client end of this token to
11499    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
11500    /// [`fuchsia.sysmem2/Node.Sync`] after
11501    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
11502    /// Or the client can use
11503    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
11504    /// essentially includes the `Sync`.
11505    ///
11506    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11507    /// fail the group's subtree and close the connection.
11508    ///
11509    /// After all children have been created, send AllChildrenPresent.
11510    ///
11511    /// + request `token_request` The server end of the new token channel.
11512    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
11513    ///   token allows the holder to get the same rights to buffers as the
11514    ///   parent token (of the group) had. When the value isn't
11515    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
11516    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
11517    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
11518    ///   causes subtree failure.
11519    CreateChild {
11520        payload: BufferCollectionTokenGroupCreateChildRequest,
11521        control_handle: BufferCollectionTokenGroupControlHandle,
11522    },
11523    /// Create 1 or more child tokens at once, synchronously.  In contrast to
11524    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
11525    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
11526    /// of a returned token to
11527    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
11528    ///
11529    /// The lower-index child tokens are higher priority (attempted sooner) than
11530    /// higher-index child tokens.
11531    ///
11532    /// As per all child tokens, successful aggregation will choose exactly one
11533    /// child among all created children (across all children created across
11534    /// potentially multiple calls to
11535    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
11536    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
11537    ///
11538    /// The maximum permissible total number of children per group, and total
11539    /// number of nodes in an overall tree (from the root) are capped to limits
11540    /// which are not configurable via these protocols.
11541    ///
11542    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
11543    /// this will fail the group's subtree and close the connection.
11544    ///
11545    /// After all children have been created, send AllChildrenPresent.
11546    ///
11547    /// + request `rights_attentuation_masks` The size of the
11548    ///   `rights_attentuation_masks` determines the number of created child
11549    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
11550    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
11551    ///   other value, each 0 bit in the mask attenuates that right.
11552    /// - response `tokens` The created child tokens.
11553    CreateChildrenSync {
11554        payload: BufferCollectionTokenGroupCreateChildrenSyncRequest,
11555        responder: BufferCollectionTokenGroupCreateChildrenSyncResponder,
11556    },
11557    /// Indicate that no more children will be created.
11558    ///
11559    /// After creating all children, the client should send
11560    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
11561    /// inform sysmem that no more children will be created, so that sysmem can
11562    /// know when it's ok to start aggregating constraints.
11563    ///
11564    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11565    /// fail the group's subtree and close the connection.
11566    ///
11567    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
11568    /// after `AllChildrenPresent`, else failure of the group's subtree will be
11569    /// triggered. This is intentionally not analogous to how `Release` without
11570    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
11571    /// subtree failure.
11572    AllChildrenPresent { control_handle: BufferCollectionTokenGroupControlHandle },
11573    /// An interaction was received which does not match any known method.
11574    #[non_exhaustive]
11575    _UnknownMethod {
11576        /// Ordinal of the method that was called.
11577        ordinal: u64,
11578        control_handle: BufferCollectionTokenGroupControlHandle,
11579        method_type: fidl::MethodType,
11580    },
11581}
11582
11583impl BufferCollectionTokenGroupRequest {
11584    #[allow(irrefutable_let_patterns)]
11585    pub fn into_sync(self) -> Option<(BufferCollectionTokenGroupSyncResponder)> {
11586        if let BufferCollectionTokenGroupRequest::Sync { responder } = self {
11587            Some((responder))
11588        } else {
11589            None
11590        }
11591    }
11592
11593    #[allow(irrefutable_let_patterns)]
11594    pub fn into_release(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11595        if let BufferCollectionTokenGroupRequest::Release { control_handle } = self {
11596            Some((control_handle))
11597        } else {
11598            None
11599        }
11600    }
11601
11602    #[allow(irrefutable_let_patterns)]
11603    pub fn into_set_name(
11604        self,
11605    ) -> Option<(NodeSetNameRequest, BufferCollectionTokenGroupControlHandle)> {
11606        if let BufferCollectionTokenGroupRequest::SetName { payload, control_handle } = self {
11607            Some((payload, control_handle))
11608        } else {
11609            None
11610        }
11611    }
11612
11613    #[allow(irrefutable_let_patterns)]
11614    pub fn into_set_debug_client_info(
11615        self,
11616    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenGroupControlHandle)> {
11617        if let BufferCollectionTokenGroupRequest::SetDebugClientInfo { payload, control_handle } =
11618            self
11619        {
11620            Some((payload, control_handle))
11621        } else {
11622            None
11623        }
11624    }
11625
11626    #[allow(irrefutable_let_patterns)]
11627    pub fn into_set_debug_timeout_log_deadline(
11628        self,
11629    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenGroupControlHandle)>
11630    {
11631        if let BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {
11632            payload,
11633            control_handle,
11634        } = self
11635        {
11636            Some((payload, control_handle))
11637        } else {
11638            None
11639        }
11640    }
11641
11642    #[allow(irrefutable_let_patterns)]
11643    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11644        if let BufferCollectionTokenGroupRequest::SetVerboseLogging { control_handle } = self {
11645            Some((control_handle))
11646        } else {
11647            None
11648        }
11649    }
11650
11651    #[allow(irrefutable_let_patterns)]
11652    pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGroupGetNodeRefResponder)> {
11653        if let BufferCollectionTokenGroupRequest::GetNodeRef { responder } = self {
11654            Some((responder))
11655        } else {
11656            None
11657        }
11658    }
11659
11660    #[allow(irrefutable_let_patterns)]
11661    pub fn into_is_alternate_for(
11662        self,
11663    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenGroupIsAlternateForResponder)>
11664    {
11665        if let BufferCollectionTokenGroupRequest::IsAlternateFor { payload, responder } = self {
11666            Some((payload, responder))
11667        } else {
11668            None
11669        }
11670    }
11671
11672    #[allow(irrefutable_let_patterns)]
11673    pub fn into_get_buffer_collection_id(
11674        self,
11675    ) -> Option<(BufferCollectionTokenGroupGetBufferCollectionIdResponder)> {
11676        if let BufferCollectionTokenGroupRequest::GetBufferCollectionId { responder } = self {
11677            Some((responder))
11678        } else {
11679            None
11680        }
11681    }
11682
11683    #[allow(irrefutable_let_patterns)]
11684    pub fn into_set_weak(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11685        if let BufferCollectionTokenGroupRequest::SetWeak { control_handle } = self {
11686            Some((control_handle))
11687        } else {
11688            None
11689        }
11690    }
11691
11692    #[allow(irrefutable_let_patterns)]
11693    pub fn into_set_weak_ok(
11694        self,
11695    ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenGroupControlHandle)> {
11696        if let BufferCollectionTokenGroupRequest::SetWeakOk { payload, control_handle } = self {
11697            Some((payload, control_handle))
11698        } else {
11699            None
11700        }
11701    }
11702
11703    #[allow(irrefutable_let_patterns)]
11704    pub fn into_attach_node_tracking(
11705        self,
11706    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenGroupControlHandle)> {
11707        if let BufferCollectionTokenGroupRequest::AttachNodeTracking { payload, control_handle } =
11708            self
11709        {
11710            Some((payload, control_handle))
11711        } else {
11712            None
11713        }
11714    }
11715
11716    #[allow(irrefutable_let_patterns)]
11717    pub fn into_create_child(
11718        self,
11719    ) -> Option<(
11720        BufferCollectionTokenGroupCreateChildRequest,
11721        BufferCollectionTokenGroupControlHandle,
11722    )> {
11723        if let BufferCollectionTokenGroupRequest::CreateChild { payload, control_handle } = self {
11724            Some((payload, control_handle))
11725        } else {
11726            None
11727        }
11728    }
11729
11730    #[allow(irrefutable_let_patterns)]
11731    pub fn into_create_children_sync(
11732        self,
11733    ) -> Option<(
11734        BufferCollectionTokenGroupCreateChildrenSyncRequest,
11735        BufferCollectionTokenGroupCreateChildrenSyncResponder,
11736    )> {
11737        if let BufferCollectionTokenGroupRequest::CreateChildrenSync { payload, responder } = self {
11738            Some((payload, responder))
11739        } else {
11740            None
11741        }
11742    }
11743
11744    #[allow(irrefutable_let_patterns)]
11745    pub fn into_all_children_present(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11746        if let BufferCollectionTokenGroupRequest::AllChildrenPresent { control_handle } = self {
11747            Some((control_handle))
11748        } else {
11749            None
11750        }
11751    }
11752
11753    /// Name of the method defined in FIDL
11754    pub fn method_name(&self) -> &'static str {
11755        match *self {
11756            BufferCollectionTokenGroupRequest::Sync { .. } => "sync",
11757            BufferCollectionTokenGroupRequest::Release { .. } => "release",
11758            BufferCollectionTokenGroupRequest::SetName { .. } => "set_name",
11759            BufferCollectionTokenGroupRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
11760            BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline { .. } => {
11761                "set_debug_timeout_log_deadline"
11762            }
11763            BufferCollectionTokenGroupRequest::SetVerboseLogging { .. } => "set_verbose_logging",
11764            BufferCollectionTokenGroupRequest::GetNodeRef { .. } => "get_node_ref",
11765            BufferCollectionTokenGroupRequest::IsAlternateFor { .. } => "is_alternate_for",
11766            BufferCollectionTokenGroupRequest::GetBufferCollectionId { .. } => {
11767                "get_buffer_collection_id"
11768            }
11769            BufferCollectionTokenGroupRequest::SetWeak { .. } => "set_weak",
11770            BufferCollectionTokenGroupRequest::SetWeakOk { .. } => "set_weak_ok",
11771            BufferCollectionTokenGroupRequest::AttachNodeTracking { .. } => "attach_node_tracking",
11772            BufferCollectionTokenGroupRequest::CreateChild { .. } => "create_child",
11773            BufferCollectionTokenGroupRequest::CreateChildrenSync { .. } => "create_children_sync",
11774            BufferCollectionTokenGroupRequest::AllChildrenPresent { .. } => "all_children_present",
11775            BufferCollectionTokenGroupRequest::_UnknownMethod {
11776                method_type: fidl::MethodType::OneWay,
11777                ..
11778            } => "unknown one-way method",
11779            BufferCollectionTokenGroupRequest::_UnknownMethod {
11780                method_type: fidl::MethodType::TwoWay,
11781                ..
11782            } => "unknown two-way method",
11783        }
11784    }
11785}
11786
11787#[derive(Debug, Clone)]
11788pub struct BufferCollectionTokenGroupControlHandle {
11789    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
11790}
11791
11792impl fidl::endpoints::ControlHandle for BufferCollectionTokenGroupControlHandle {
11793    fn shutdown(&self) {
11794        self.inner.shutdown()
11795    }
11796    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
11797        self.inner.shutdown_with_epitaph(status)
11798    }
11799
11800    fn is_closed(&self) -> bool {
11801        self.inner.channel().is_closed()
11802    }
11803    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
11804        self.inner.channel().on_closed()
11805    }
11806
11807    #[cfg(target_os = "fuchsia")]
11808    fn signal_peer(
11809        &self,
11810        clear_mask: zx::Signals,
11811        set_mask: zx::Signals,
11812    ) -> Result<(), zx_status::Status> {
11813        use fidl::Peered;
11814        self.inner.channel().signal_peer(clear_mask, set_mask)
11815    }
11816}
11817
11818impl BufferCollectionTokenGroupControlHandle {}
11819
11820#[must_use = "FIDL methods require a response to be sent"]
11821#[derive(Debug)]
11822pub struct BufferCollectionTokenGroupSyncResponder {
11823    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11824    tx_id: u32,
11825}
11826
11827/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11828/// if the responder is dropped without sending a response, so that the client
11829/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11830impl std::ops::Drop for BufferCollectionTokenGroupSyncResponder {
11831    fn drop(&mut self) {
11832        self.control_handle.shutdown();
11833        // Safety: drops once, never accessed again
11834        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11835    }
11836}
11837
11838impl fidl::endpoints::Responder for BufferCollectionTokenGroupSyncResponder {
11839    type ControlHandle = BufferCollectionTokenGroupControlHandle;
11840
11841    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11842        &self.control_handle
11843    }
11844
11845    fn drop_without_shutdown(mut self) {
11846        // Safety: drops once, never accessed again due to mem::forget
11847        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11848        // Prevent Drop from running (which would shut down the channel)
11849        std::mem::forget(self);
11850    }
11851}
11852
11853impl BufferCollectionTokenGroupSyncResponder {
11854    /// Sends a response to the FIDL transaction.
11855    ///
11856    /// Sets the channel to shutdown if an error occurs.
11857    pub fn send(self) -> Result<(), fidl::Error> {
11858        let _result = self.send_raw();
11859        if _result.is_err() {
11860            self.control_handle.shutdown();
11861        }
11862        self.drop_without_shutdown();
11863        _result
11864    }
11865
11866    /// Similar to "send" but does not shutdown the channel if an error occurs.
11867    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
11868        let _result = self.send_raw();
11869        self.drop_without_shutdown();
11870        _result
11871    }
11872
11873    fn send_raw(&self) -> Result<(), fidl::Error> {
11874        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
11875            fidl::encoding::Flexible::new(()),
11876            self.tx_id,
11877            0x11ac2555cf575b54,
11878            fidl::encoding::DynamicFlags::FLEXIBLE,
11879        )
11880    }
11881}
11882
11883#[must_use = "FIDL methods require a response to be sent"]
11884#[derive(Debug)]
11885pub struct BufferCollectionTokenGroupGetNodeRefResponder {
11886    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11887    tx_id: u32,
11888}
11889
11890/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11891/// if the responder is dropped without sending a response, so that the client
11892/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11893impl std::ops::Drop for BufferCollectionTokenGroupGetNodeRefResponder {
11894    fn drop(&mut self) {
11895        self.control_handle.shutdown();
11896        // Safety: drops once, never accessed again
11897        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11898    }
11899}
11900
11901impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetNodeRefResponder {
11902    type ControlHandle = BufferCollectionTokenGroupControlHandle;
11903
11904    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11905        &self.control_handle
11906    }
11907
11908    fn drop_without_shutdown(mut self) {
11909        // Safety: drops once, never accessed again due to mem::forget
11910        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11911        // Prevent Drop from running (which would shut down the channel)
11912        std::mem::forget(self);
11913    }
11914}
11915
11916impl BufferCollectionTokenGroupGetNodeRefResponder {
11917    /// Sends a response to the FIDL transaction.
11918    ///
11919    /// Sets the channel to shutdown if an error occurs.
11920    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
11921        let _result = self.send_raw(payload);
11922        if _result.is_err() {
11923            self.control_handle.shutdown();
11924        }
11925        self.drop_without_shutdown();
11926        _result
11927    }
11928
11929    /// Similar to "send" but does not shutdown the channel if an error occurs.
11930    pub fn send_no_shutdown_on_err(
11931        self,
11932        mut payload: NodeGetNodeRefResponse,
11933    ) -> Result<(), fidl::Error> {
11934        let _result = self.send_raw(payload);
11935        self.drop_without_shutdown();
11936        _result
11937    }
11938
11939    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
11940        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
11941            fidl::encoding::Flexible::new(&mut payload),
11942            self.tx_id,
11943            0x5b3d0e51614df053,
11944            fidl::encoding::DynamicFlags::FLEXIBLE,
11945        )
11946    }
11947}
11948
11949#[must_use = "FIDL methods require a response to be sent"]
11950#[derive(Debug)]
11951pub struct BufferCollectionTokenGroupIsAlternateForResponder {
11952    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11953    tx_id: u32,
11954}
11955
11956/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11957/// if the responder is dropped without sending a response, so that the client
11958/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11959impl std::ops::Drop for BufferCollectionTokenGroupIsAlternateForResponder {
11960    fn drop(&mut self) {
11961        self.control_handle.shutdown();
11962        // Safety: drops once, never accessed again
11963        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11964    }
11965}
11966
11967impl fidl::endpoints::Responder for BufferCollectionTokenGroupIsAlternateForResponder {
11968    type ControlHandle = BufferCollectionTokenGroupControlHandle;
11969
11970    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11971        &self.control_handle
11972    }
11973
11974    fn drop_without_shutdown(mut self) {
11975        // Safety: drops once, never accessed again due to mem::forget
11976        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11977        // Prevent Drop from running (which would shut down the channel)
11978        std::mem::forget(self);
11979    }
11980}
11981
11982impl BufferCollectionTokenGroupIsAlternateForResponder {
11983    /// Sends a response to the FIDL transaction.
11984    ///
11985    /// Sets the channel to shutdown if an error occurs.
11986    pub fn send(
11987        self,
11988        mut result: Result<&NodeIsAlternateForResponse, Error>,
11989    ) -> Result<(), fidl::Error> {
11990        let _result = self.send_raw(result);
11991        if _result.is_err() {
11992            self.control_handle.shutdown();
11993        }
11994        self.drop_without_shutdown();
11995        _result
11996    }
11997
11998    /// Similar to "send" but does not shutdown the channel if an error occurs.
11999    pub fn send_no_shutdown_on_err(
12000        self,
12001        mut result: Result<&NodeIsAlternateForResponse, Error>,
12002    ) -> Result<(), fidl::Error> {
12003        let _result = self.send_raw(result);
12004        self.drop_without_shutdown();
12005        _result
12006    }
12007
12008    fn send_raw(
12009        &self,
12010        mut result: Result<&NodeIsAlternateForResponse, Error>,
12011    ) -> Result<(), fidl::Error> {
12012        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
12013            NodeIsAlternateForResponse,
12014            Error,
12015        >>(
12016            fidl::encoding::FlexibleResult::new(result),
12017            self.tx_id,
12018            0x3a58e00157e0825,
12019            fidl::encoding::DynamicFlags::FLEXIBLE,
12020        )
12021    }
12022}
12023
12024#[must_use = "FIDL methods require a response to be sent"]
12025#[derive(Debug)]
12026pub struct BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12027    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12028    tx_id: u32,
12029}
12030
12031/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12032/// if the responder is dropped without sending a response, so that the client
12033/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12034impl std::ops::Drop for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12035    fn drop(&mut self) {
12036        self.control_handle.shutdown();
12037        // Safety: drops once, never accessed again
12038        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12039    }
12040}
12041
12042impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12043    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12044
12045    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12046        &self.control_handle
12047    }
12048
12049    fn drop_without_shutdown(mut self) {
12050        // Safety: drops once, never accessed again due to mem::forget
12051        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12052        // Prevent Drop from running (which would shut down the channel)
12053        std::mem::forget(self);
12054    }
12055}
12056
12057impl BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12058    /// Sends a response to the FIDL transaction.
12059    ///
12060    /// Sets the channel to shutdown if an error occurs.
12061    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12062        let _result = self.send_raw(payload);
12063        if _result.is_err() {
12064            self.control_handle.shutdown();
12065        }
12066        self.drop_without_shutdown();
12067        _result
12068    }
12069
12070    /// Similar to "send" but does not shutdown the channel if an error occurs.
12071    pub fn send_no_shutdown_on_err(
12072        self,
12073        mut payload: &NodeGetBufferCollectionIdResponse,
12074    ) -> Result<(), fidl::Error> {
12075        let _result = self.send_raw(payload);
12076        self.drop_without_shutdown();
12077        _result
12078    }
12079
12080    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12081        self.control_handle
12082            .inner
12083            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
12084                fidl::encoding::Flexible::new(payload),
12085                self.tx_id,
12086                0x77d19a494b78ba8c,
12087                fidl::encoding::DynamicFlags::FLEXIBLE,
12088            )
12089    }
12090}
12091
12092#[must_use = "FIDL methods require a response to be sent"]
12093#[derive(Debug)]
12094pub struct BufferCollectionTokenGroupCreateChildrenSyncResponder {
12095    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12096    tx_id: u32,
12097}
12098
12099/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12100/// if the responder is dropped without sending a response, so that the client
12101/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12102impl std::ops::Drop for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12103    fn drop(&mut self) {
12104        self.control_handle.shutdown();
12105        // Safety: drops once, never accessed again
12106        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12107    }
12108}
12109
12110impl fidl::endpoints::Responder for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12111    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12112
12113    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12114        &self.control_handle
12115    }
12116
12117    fn drop_without_shutdown(mut self) {
12118        // Safety: drops once, never accessed again due to mem::forget
12119        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12120        // Prevent Drop from running (which would shut down the channel)
12121        std::mem::forget(self);
12122    }
12123}
12124
12125impl BufferCollectionTokenGroupCreateChildrenSyncResponder {
12126    /// Sends a response to the FIDL transaction.
12127    ///
12128    /// Sets the channel to shutdown if an error occurs.
12129    pub fn send(
12130        self,
12131        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12132    ) -> Result<(), fidl::Error> {
12133        let _result = self.send_raw(payload);
12134        if _result.is_err() {
12135            self.control_handle.shutdown();
12136        }
12137        self.drop_without_shutdown();
12138        _result
12139    }
12140
12141    /// Similar to "send" but does not shutdown the channel if an error occurs.
12142    pub fn send_no_shutdown_on_err(
12143        self,
12144        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12145    ) -> Result<(), fidl::Error> {
12146        let _result = self.send_raw(payload);
12147        self.drop_without_shutdown();
12148        _result
12149    }
12150
12151    fn send_raw(
12152        &self,
12153        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12154    ) -> Result<(), fidl::Error> {
12155        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
12156            BufferCollectionTokenGroupCreateChildrenSyncResponse,
12157        >>(
12158            fidl::encoding::Flexible::new(&mut payload),
12159            self.tx_id,
12160            0x15dea448c536070a,
12161            fidl::encoding::DynamicFlags::FLEXIBLE,
12162        )
12163    }
12164}
12165
12166#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
12167pub struct NodeMarker;
12168
12169impl fidl::endpoints::ProtocolMarker for NodeMarker {
12170    type Proxy = NodeProxy;
12171    type RequestStream = NodeRequestStream;
12172    #[cfg(target_os = "fuchsia")]
12173    type SynchronousProxy = NodeSynchronousProxy;
12174
12175    const DEBUG_NAME: &'static str = "(anonymous) Node";
12176}
12177pub type NodeIsAlternateForResult = Result<NodeIsAlternateForResponse, Error>;
12178
12179pub trait NodeProxyInterface: Send + Sync {
12180    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
12181    fn r#sync(&self) -> Self::SyncResponseFut;
12182    fn r#release(&self) -> Result<(), fidl::Error>;
12183    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
12184    fn r#set_debug_client_info(
12185        &self,
12186        payload: &NodeSetDebugClientInfoRequest,
12187    ) -> Result<(), fidl::Error>;
12188    fn r#set_debug_timeout_log_deadline(
12189        &self,
12190        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12191    ) -> Result<(), fidl::Error>;
12192    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
12193    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
12194        + Send;
12195    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
12196    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
12197        + Send;
12198    fn r#is_alternate_for(
12199        &self,
12200        payload: NodeIsAlternateForRequest,
12201    ) -> Self::IsAlternateForResponseFut;
12202    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
12203        + Send;
12204    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
12205    fn r#set_weak(&self) -> Result<(), fidl::Error>;
12206    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
12207    fn r#attach_node_tracking(
12208        &self,
12209        payload: NodeAttachNodeTrackingRequest,
12210    ) -> Result<(), fidl::Error>;
12211}
12212#[derive(Debug)]
12213#[cfg(target_os = "fuchsia")]
12214pub struct NodeSynchronousProxy {
12215    client: fidl::client::sync::Client,
12216}
12217
12218#[cfg(target_os = "fuchsia")]
12219impl fidl::endpoints::SynchronousProxy for NodeSynchronousProxy {
12220    type Proxy = NodeProxy;
12221    type Protocol = NodeMarker;
12222
12223    fn from_channel(inner: fidl::Channel) -> Self {
12224        Self::new(inner)
12225    }
12226
12227    fn into_channel(self) -> fidl::Channel {
12228        self.client.into_channel()
12229    }
12230
12231    fn as_channel(&self) -> &fidl::Channel {
12232        self.client.as_channel()
12233    }
12234}
12235
12236#[cfg(target_os = "fuchsia")]
12237impl NodeSynchronousProxy {
12238    pub fn new(channel: fidl::Channel) -> Self {
12239        let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12240        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
12241    }
12242
12243    pub fn into_channel(self) -> fidl::Channel {
12244        self.client.into_channel()
12245    }
12246
12247    /// Waits until an event arrives and returns it. It is safe for other
12248    /// threads to make concurrent requests while waiting for an event.
12249    pub fn wait_for_event(&self, deadline: zx::MonotonicInstant) -> Result<NodeEvent, fidl::Error> {
12250        NodeEvent::decode(self.client.wait_for_event(deadline)?)
12251    }
12252
12253    /// Ensure that previous messages have been received server side. This is
12254    /// particularly useful after previous messages that created new tokens,
12255    /// because a token must be known to the sysmem server before sending the
12256    /// token to another participant.
12257    ///
12258    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12259    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12260    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12261    /// to mitigate the possibility of a hostile/fake
12262    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12263    /// Another way is to pass the token to
12264    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12265    /// the token as part of exchanging it for a
12266    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12267    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12268    /// of stalling.
12269    ///
12270    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12271    /// and then starting and completing a `Sync`, it's then safe to send the
12272    /// `BufferCollectionToken` client ends to other participants knowing the
12273    /// server will recognize the tokens when they're sent by the other
12274    /// participants to sysmem in a
12275    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12276    /// efficient way to create tokens while avoiding unnecessary round trips.
12277    ///
12278    /// Other options include waiting for each
12279    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12280    /// individually (using separate call to `Sync` after each), or calling
12281    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12282    /// converted to a `BufferCollection` via
12283    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12284    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12285    /// the sync step and can create multiple tokens at once.
12286    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
12287        let _response = self.client.send_query::<
12288            fidl::encoding::EmptyPayload,
12289            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
12290        >(
12291            (),
12292            0x11ac2555cf575b54,
12293            fidl::encoding::DynamicFlags::FLEXIBLE,
12294            ___deadline,
12295        )?
12296        .into_result::<NodeMarker>("sync")?;
12297        Ok(_response)
12298    }
12299
12300    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12301    ///
12302    /// Normally a participant will convert a `BufferCollectionToken` into a
12303    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12304    /// `Release` via the token (and then close the channel immediately or
12305    /// shortly later in response to server closing the server end), which
12306    /// avoids causing buffer collection failure. Without a prior `Release`,
12307    /// closing the `BufferCollectionToken` client end will cause buffer
12308    /// collection failure.
12309    ///
12310    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12311    ///
12312    /// By default the server handles unexpected closure of a
12313    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12314    /// first) by failing the buffer collection. Partly this is to expedite
12315    /// closing VMO handles to reclaim memory when any participant fails. If a
12316    /// participant would like to cleanly close a `BufferCollection` without
12317    /// causing buffer collection failure, the participant can send `Release`
12318    /// before closing the `BufferCollection` client end. The `Release` can
12319    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12320    /// buffer collection won't require constraints from this node in order to
12321    /// allocate. If after `SetConstraints`, the constraints are retained and
12322    /// aggregated, despite the lack of `BufferCollection` connection at the
12323    /// time of constraints aggregation.
12324    ///
12325    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
12326    ///
12327    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
12328    /// end (without `Release` first) will trigger failure of the buffer
12329    /// collection. To close a `BufferCollectionTokenGroup` channel without
12330    /// failing the buffer collection, ensure that AllChildrenPresent() has been
12331    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
12332    /// client end.
12333    ///
12334    /// If `Release` occurs before
12335    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
12336    /// buffer collection will fail (triggered by reception of `Release` without
12337    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
12338    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
12339    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
12340    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
12341    /// close requires `AllChildrenPresent` (if not already sent), then
12342    /// `Release`, then close client end.
12343    ///
12344    /// If `Release` occurs after `AllChildrenPresent`, the children and all
12345    /// their constraints remain intact (just as they would if the
12346    /// `BufferCollectionTokenGroup` channel had remained open), and the client
12347    /// end close doesn't trigger buffer collection failure.
12348    ///
12349    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
12350    ///
12351    /// For brevity, the per-channel-protocol paragraphs above ignore the
12352    /// separate failure domain created by
12353    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
12354    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
12355    /// unexpectedly closes (without `Release` first) and that client end is
12356    /// under a failure domain, instead of failing the whole buffer collection,
12357    /// the failure domain is failed, but the buffer collection itself is
12358    /// isolated from failure of the failure domain. Such failure domains can be
12359    /// nested, in which case only the inner-most failure domain in which the
12360    /// `Node` resides fails.
12361    pub fn r#release(&self) -> Result<(), fidl::Error> {
12362        self.client.send::<fidl::encoding::EmptyPayload>(
12363            (),
12364            0x6a5cae7d6d6e04c6,
12365            fidl::encoding::DynamicFlags::FLEXIBLE,
12366        )
12367    }
12368
12369    /// Set a name for VMOs in this buffer collection.
12370    ///
12371    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
12372    /// will be truncated to fit. The name of the vmo will be suffixed with the
12373    /// buffer index within the collection (if the suffix fits within
12374    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
12375    /// listed in the inspect data.
12376    ///
12377    /// The name only affects VMOs allocated after the name is set; this call
12378    /// does not rename existing VMOs. If multiple clients set different names
12379    /// then the larger priority value will win. Setting a new name with the
12380    /// same priority as a prior name doesn't change the name.
12381    ///
12382    /// All table fields are currently required.
12383    ///
12384    /// + request `priority` The name is only set if this is the first `SetName`
12385    ///   or if `priority` is greater than any previous `priority` value in
12386    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
12387    /// + request `name` The name for VMOs created under this buffer collection.
12388    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
12389        self.client.send::<NodeSetNameRequest>(
12390            payload,
12391            0xb41f1624f48c1e9,
12392            fidl::encoding::DynamicFlags::FLEXIBLE,
12393        )
12394    }
12395
12396    /// Set information about the current client that can be used by sysmem to
12397    /// help diagnose leaking memory and allocation stalls waiting for a
12398    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
12399    ///
12400    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
12401    /// `Node`(s) derived from this `Node`, unless overriden by
12402    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
12403    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
12404    ///
12405    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
12406    /// `Allocator` is the most efficient way to ensure that all
12407    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
12408    /// set, and is also more efficient than separately sending the same debug
12409    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
12410    /// created [`fuchsia.sysmem2/Node`].
12411    ///
12412    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
12413    /// indicate which client is closing their channel first, leading to subtree
12414    /// failure (which can be normal if the purpose of the subtree is over, but
12415    /// if happening earlier than expected, the client-channel-specific name can
12416    /// help diagnose where the failure is first coming from, from sysmem's
12417    /// point of view).
12418    ///
12419    /// All table fields are currently required.
12420    ///
12421    /// + request `name` This can be an arbitrary string, but the current
12422    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
12423    /// + request `id` This can be an arbitrary id, but the current process ID
12424    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
12425    pub fn r#set_debug_client_info(
12426        &self,
12427        mut payload: &NodeSetDebugClientInfoRequest,
12428    ) -> Result<(), fidl::Error> {
12429        self.client.send::<NodeSetDebugClientInfoRequest>(
12430            payload,
12431            0x5cde8914608d99b1,
12432            fidl::encoding::DynamicFlags::FLEXIBLE,
12433        )
12434    }
12435
12436    /// Sysmem logs a warning if sysmem hasn't seen
12437    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
12438    /// within 5 seconds after creation of a new collection.
12439    ///
12440    /// Clients can call this method to change when the log is printed. If
12441    /// multiple client set the deadline, it's unspecified which deadline will
12442    /// take effect.
12443    ///
12444    /// In most cases the default works well.
12445    ///
12446    /// All table fields are currently required.
12447    ///
12448    /// + request `deadline` The time at which sysmem will start trying to log
12449    ///   the warning, unless all constraints are with sysmem by then.
12450    pub fn r#set_debug_timeout_log_deadline(
12451        &self,
12452        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12453    ) -> Result<(), fidl::Error> {
12454        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
12455            payload,
12456            0x716b0af13d5c0806,
12457            fidl::encoding::DynamicFlags::FLEXIBLE,
12458        )
12459    }
12460
12461    /// This enables verbose logging for the buffer collection.
12462    ///
12463    /// Verbose logging includes constraints set via
12464    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
12465    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
12466    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
12467    /// the tree of `Node`(s).
12468    ///
12469    /// Normally sysmem prints only a single line complaint when aggregation
12470    /// fails, with just the specific detailed reason that aggregation failed,
12471    /// with little surrounding context.  While this is often enough to diagnose
12472    /// a problem if only a small change was made and everything was working
12473    /// before the small change, it's often not particularly helpful for getting
12474    /// a new buffer collection to work for the first time.  Especially with
12475    /// more complex trees of nodes, involving things like
12476    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
12477    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
12478    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
12479    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
12480    /// looks like and why it's failing a logical allocation, or why a tree or
12481    /// subtree is failing sooner than expected.
12482    ///
12483    /// The intent of the extra logging is to be acceptable from a performance
12484    /// point of view, under the assumption that verbose logging is only enabled
12485    /// on a low number of buffer collections. If we're not tracking down a bug,
12486    /// we shouldn't send this message.
12487    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
12488        self.client.send::<fidl::encoding::EmptyPayload>(
12489            (),
12490            0x5209c77415b4dfad,
12491            fidl::encoding::DynamicFlags::FLEXIBLE,
12492        )
12493    }
12494
12495    /// This gets a handle that can be used as a parameter to
12496    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
12497    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
12498    /// client obtained this handle from this `Node`.
12499    ///
12500    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
12501    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
12502    /// despite the two calls typically being on different channels.
12503    ///
12504    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
12505    ///
12506    /// All table fields are currently required.
12507    ///
12508    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
12509    ///   different `Node` channel, to prove that the client obtained the handle
12510    ///   from this `Node`.
12511    pub fn r#get_node_ref(
12512        &self,
12513        ___deadline: zx::MonotonicInstant,
12514    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
12515        let _response = self.client.send_query::<
12516            fidl::encoding::EmptyPayload,
12517            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
12518        >(
12519            (),
12520            0x5b3d0e51614df053,
12521            fidl::encoding::DynamicFlags::FLEXIBLE,
12522            ___deadline,
12523        )?
12524        .into_result::<NodeMarker>("get_node_ref")?;
12525        Ok(_response)
12526    }
12527
12528    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
12529    /// rooted at a different child token of a common parent
12530    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
12531    /// passed-in `node_ref`.
12532    ///
12533    /// This call is for assisting with admission control de-duplication, and
12534    /// with debugging.
12535    ///
12536    /// The `node_ref` must be obtained using
12537    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
12538    ///
12539    /// The `node_ref` can be a duplicated handle; it's not necessary to call
12540    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
12541    ///
12542    /// If a calling token may not actually be a valid token at all due to a
12543    /// potentially hostile/untrusted provider of the token, call
12544    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
12545    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
12546    /// never responds due to a calling token not being a real token (not really
12547    /// talking to sysmem).  Another option is to call
12548    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
12549    /// which also validates the token along with converting it to a
12550    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
12551    ///
12552    /// All table fields are currently required.
12553    ///
12554    /// - response `is_alternate`
12555    ///   - true: The first parent node in common between the calling node and
12556    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
12557    ///     that the calling `Node` and the `node_ref` `Node` will not have both
12558    ///     their constraints apply - rather sysmem will choose one or the other
12559    ///     of the constraints - never both.  This is because only one child of
12560    ///     a `BufferCollectionTokenGroup` is selected during logical
12561    ///     allocation, with only that one child's subtree contributing to
12562    ///     constraints aggregation.
12563    ///   - false: The first parent node in common between the calling `Node`
12564    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
12565    ///     Currently, this means the first parent node in common is a
12566    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
12567    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
12568    ///     `Node` may have both their constraints apply during constraints
12569    ///     aggregation of the logical allocation, if both `Node`(s) are
12570    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
12571    ///     this case, there is no `BufferCollectionTokenGroup` that will
12572    ///     directly prevent the two `Node`(s) from both being selected and
12573    ///     their constraints both aggregated, but even when false, one or both
12574    ///     `Node`(s) may still be eliminated from consideration if one or both
12575    ///     `Node`(s) has a direct or indirect parent
12576    ///     `BufferCollectionTokenGroup` which selects a child subtree other
12577    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
12578    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
12579    ///   associated with the same buffer collection as the calling `Node`.
12580    ///   Another reason for this error is if the `node_ref` is an
12581    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
12582    ///   a real `node_ref` obtained from `GetNodeRef`.
12583    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
12584    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
12585    ///   the needed rights expected on a real `node_ref`.
12586    /// * No other failing status codes are returned by this call.  However,
12587    ///   sysmem may add additional codes in future, so the client should have
12588    ///   sensible default handling for any failing status code.
12589    pub fn r#is_alternate_for(
12590        &self,
12591        mut payload: NodeIsAlternateForRequest,
12592        ___deadline: zx::MonotonicInstant,
12593    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
12594        let _response = self.client.send_query::<
12595            NodeIsAlternateForRequest,
12596            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
12597        >(
12598            &mut payload,
12599            0x3a58e00157e0825,
12600            fidl::encoding::DynamicFlags::FLEXIBLE,
12601            ___deadline,
12602        )?
12603        .into_result::<NodeMarker>("is_alternate_for")?;
12604        Ok(_response.map(|x| x))
12605    }
12606
12607    /// Get the buffer collection ID. This ID is also available from
12608    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
12609    /// within the collection).
12610    ///
12611    /// This call is mainly useful in situations where we can't convey a
12612    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
12613    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
12614    /// handle, which can be joined back up with a `BufferCollection` client end
12615    /// that was created via a different path. Prefer to convey a
12616    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
12617    ///
12618    /// Trusting a `buffer_collection_id` value from a source other than sysmem
12619    /// is analogous to trusting a koid value from a source other than zircon.
12620    /// Both should be avoided unless really necessary, and both require
12621    /// caution. In some situations it may be reasonable to refer to a
12622    /// pre-established `BufferCollection` by `buffer_collection_id` via a
12623    /// protocol for efficiency reasons, but an incoming value purporting to be
12624    /// a `buffer_collection_id` is not sufficient alone to justify granting the
12625    /// sender of the `buffer_collection_id` any capability. The sender must
12626    /// first prove to a receiver that the sender has/had a VMO or has/had a
12627    /// `BufferCollectionToken` to the same collection by sending a handle that
12628    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
12629    /// `buffer_collection_id` value. The receiver should take care to avoid
12630    /// assuming that a sender had a `BufferCollectionToken` in cases where the
12631    /// sender has only proven that the sender had a VMO.
12632    ///
12633    /// - response `buffer_collection_id` This ID is unique per buffer
12634    ///   collection per boot. Each buffer is uniquely identified by the
12635    ///   `buffer_collection_id` and `buffer_index` together.
12636    pub fn r#get_buffer_collection_id(
12637        &self,
12638        ___deadline: zx::MonotonicInstant,
12639    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
12640        let _response = self.client.send_query::<
12641            fidl::encoding::EmptyPayload,
12642            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
12643        >(
12644            (),
12645            0x77d19a494b78ba8c,
12646            fidl::encoding::DynamicFlags::FLEXIBLE,
12647            ___deadline,
12648        )?
12649        .into_result::<NodeMarker>("get_buffer_collection_id")?;
12650        Ok(_response)
12651    }
12652
12653    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
12654    /// created after this message to weak, which means that a client's `Node`
12655    /// client end (or a child created after this message) is not alone
12656    /// sufficient to keep allocated VMOs alive.
12657    ///
12658    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
12659    /// `close_weak_asap`.
12660    ///
12661    /// This message is only permitted before the `Node` becomes ready for
12662    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
12663    ///   * `BufferCollectionToken`: any time
12664    ///   * `BufferCollection`: before `SetConstraints`
12665    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
12666    ///
12667    /// Currently, no conversion from strong `Node` to weak `Node` after ready
12668    /// for allocation is provided, but a client can simulate that by creating
12669    /// an additional `Node` before allocation and setting that additional
12670    /// `Node` to weak, and then potentially at some point later sending
12671    /// `Release` and closing the client end of the client's strong `Node`, but
12672    /// keeping the client's weak `Node`.
12673    ///
12674    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
12675    /// collection failure (all `Node` client end(s) will see
12676    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
12677    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
12678    /// this situation until all `Node`(s) are ready for allocation. For initial
12679    /// allocation to succeed, at least one strong `Node` is required to exist
12680    /// at allocation time, but after that client receives VMO handles, that
12681    /// client can `BufferCollection.Release` and close the client end without
12682    /// causing this type of failure.
12683    ///
12684    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
12685    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
12686    /// separately as appropriate.
12687    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
12688        self.client.send::<fidl::encoding::EmptyPayload>(
12689            (),
12690            0x22dd3ea514eeffe1,
12691            fidl::encoding::DynamicFlags::FLEXIBLE,
12692        )
12693    }
12694
12695    /// This indicates to sysmem that the client is prepared to pay attention to
12696    /// `close_weak_asap`.
12697    ///
12698    /// If sent, this message must be before
12699    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
12700    ///
12701    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
12702    /// send this message before `WaitForAllBuffersAllocated`, or a parent
12703    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
12704    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
12705    /// trigger buffer collection failure.
12706    ///
12707    /// This message is necessary because weak sysmem VMOs have not always been
12708    /// a thing, so older clients are not aware of the need to pay attention to
12709    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
12710    /// sysmem weak VMO handles asap. By having this message and requiring
12711    /// participants to indicate their acceptance of this aspect of the overall
12712    /// protocol, we avoid situations where an older client is delivered a weak
12713    /// VMO without any way for sysmem to get that VMO to close quickly later
12714    /// (and on a per-buffer basis).
12715    ///
12716    /// A participant that doesn't handle `close_weak_asap` and also doesn't
12717    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
12718    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
12719    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
12720    /// same participant has a child/delegate which does retrieve VMOs, that
12721    /// child/delegate will need to send `SetWeakOk` before
12722    /// `WaitForAllBuffersAllocated`.
12723    ///
12724    /// + request `for_child_nodes_also` If present and true, this means direct
12725    ///   child nodes of this node created after this message plus all
12726    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
12727    ///   those nodes. Any child node of this node that was created before this
12728    ///   message is not included. This setting is "sticky" in the sense that a
12729    ///   subsequent `SetWeakOk` without this bool set to true does not reset
12730    ///   the server-side bool. If this creates a problem for a participant, a
12731    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
12732    ///   tokens instead, as appropriate. A participant should only set
12733    ///   `for_child_nodes_also` true if the participant can really promise to
12734    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
12735    ///   weak VMO handles held by participants holding the corresponding child
12736    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
12737    ///   which are using sysmem(1) can be weak, despite the clients of those
12738    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
12739    ///   direct way to find out about `close_weak_asap`. This only applies to
12740    ///   descendents of this `Node` which are using sysmem(1), not to this
12741    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
12742    ///   token, which will fail allocation unless an ancestor of this `Node`
12743    ///   specified `for_child_nodes_also` true.
12744    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
12745        self.client.send::<NodeSetWeakOkRequest>(
12746            &mut payload,
12747            0x38a44fc4d7724be9,
12748            fidl::encoding::DynamicFlags::FLEXIBLE,
12749        )
12750    }
12751
12752    /// The server_end will be closed after this `Node` and any child nodes have
12753    /// have released their buffer counts, making those counts available for
12754    /// reservation by a different `Node` via
12755    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
12756    ///
12757    /// The `Node` buffer counts may not be released until the entire tree of
12758    /// `Node`(s) is closed or failed, because
12759    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
12760    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
12761    /// `Node` buffer counts remain reserved until the orphaned node is later
12762    /// cleaned up.
12763    ///
12764    /// If the `Node` exceeds a fairly large number of attached eventpair server
12765    /// ends, a log message will indicate this and the `Node` (and the
12766    /// appropriate) sub-tree will fail.
12767    ///
12768    /// The `server_end` will remain open when
12769    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
12770    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
12771    /// [`fuchsia.sysmem2/BufferCollection`].
12772    ///
12773    /// This message can also be used with a
12774    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
12775    pub fn r#attach_node_tracking(
12776        &self,
12777        mut payload: NodeAttachNodeTrackingRequest,
12778    ) -> Result<(), fidl::Error> {
12779        self.client.send::<NodeAttachNodeTrackingRequest>(
12780            &mut payload,
12781            0x3f22f2a293d3cdac,
12782            fidl::encoding::DynamicFlags::FLEXIBLE,
12783        )
12784    }
12785}
12786
12787#[derive(Debug, Clone)]
12788pub struct NodeProxy {
12789    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
12790}
12791
12792impl fidl::endpoints::Proxy for NodeProxy {
12793    type Protocol = NodeMarker;
12794
12795    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
12796        Self::new(inner)
12797    }
12798
12799    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
12800        self.client.into_channel().map_err(|client| Self { client })
12801    }
12802
12803    fn as_channel(&self) -> &::fidl::AsyncChannel {
12804        self.client.as_channel()
12805    }
12806}
12807
12808impl NodeProxy {
12809    /// Create a new Proxy for fuchsia.sysmem2/Node.
12810    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
12811        let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12812        Self { client: fidl::client::Client::new(channel, protocol_name) }
12813    }
12814
12815    /// Get a Stream of events from the remote end of the protocol.
12816    ///
12817    /// # Panics
12818    ///
12819    /// Panics if the event stream was already taken.
12820    pub fn take_event_stream(&self) -> NodeEventStream {
12821        NodeEventStream { event_receiver: self.client.take_event_receiver() }
12822    }
12823
12824    /// Ensure that previous messages have been received server side. This is
12825    /// particularly useful after previous messages that created new tokens,
12826    /// because a token must be known to the sysmem server before sending the
12827    /// token to another participant.
12828    ///
12829    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12830    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12831    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12832    /// to mitigate the possibility of a hostile/fake
12833    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12834    /// Another way is to pass the token to
12835    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12836    /// the token as part of exchanging it for a
12837    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12838    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12839    /// of stalling.
12840    ///
12841    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12842    /// and then starting and completing a `Sync`, it's then safe to send the
12843    /// `BufferCollectionToken` client ends to other participants knowing the
12844    /// server will recognize the tokens when they're sent by the other
12845    /// participants to sysmem in a
12846    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12847    /// efficient way to create tokens while avoiding unnecessary round trips.
12848    ///
12849    /// Other options include waiting for each
12850    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12851    /// individually (using separate call to `Sync` after each), or calling
12852    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12853    /// converted to a `BufferCollection` via
12854    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12855    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12856    /// the sync step and can create multiple tokens at once.
12857    pub fn r#sync(
12858        &self,
12859    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
12860        NodeProxyInterface::r#sync(self)
12861    }
12862
12863    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12864    ///
12865    /// Normally a participant will convert a `BufferCollectionToken` into a
12866    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12867    /// `Release` via the token (and then close the channel immediately or
12868    /// shortly later in response to server closing the server end), which
12869    /// avoids causing buffer collection failure. Without a prior `Release`,
12870    /// closing the `BufferCollectionToken` client end will cause buffer
12871    /// collection failure.
12872    ///
12873    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12874    ///
12875    /// By default the server handles unexpected closure of a
12876    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12877    /// first) by failing the buffer collection. Partly this is to expedite
12878    /// closing VMO handles to reclaim memory when any participant fails. If a
12879    /// participant would like to cleanly close a `BufferCollection` without
12880    /// causing buffer collection failure, the participant can send `Release`
12881    /// before closing the `BufferCollection` client end. The `Release` can
12882    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12883    /// buffer collection won't require constraints from this node in order to
12884    /// allocate. If after `SetConstraints`, the constraints are retained and
12885    /// aggregated, despite the lack of `BufferCollection` connection at the
12886    /// time of constraints aggregation.
12887    ///
12888    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
12889    ///
12890    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
12891    /// end (without `Release` first) will trigger failure of the buffer
12892    /// collection. To close a `BufferCollectionTokenGroup` channel without
12893    /// failing the buffer collection, ensure that AllChildrenPresent() has been
12894    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
12895    /// client end.
12896    ///
12897    /// If `Release` occurs before
12898    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
12899    /// buffer collection will fail (triggered by reception of `Release` without
12900    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
12901    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
12902    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
12903    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
12904    /// close requires `AllChildrenPresent` (if not already sent), then
12905    /// `Release`, then close client end.
12906    ///
12907    /// If `Release` occurs after `AllChildrenPresent`, the children and all
12908    /// their constraints remain intact (just as they would if the
12909    /// `BufferCollectionTokenGroup` channel had remained open), and the client
12910    /// end close doesn't trigger buffer collection failure.
12911    ///
12912    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
12913    ///
12914    /// For brevity, the per-channel-protocol paragraphs above ignore the
12915    /// separate failure domain created by
12916    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
12917    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
12918    /// unexpectedly closes (without `Release` first) and that client end is
12919    /// under a failure domain, instead of failing the whole buffer collection,
12920    /// the failure domain is failed, but the buffer collection itself is
12921    /// isolated from failure of the failure domain. Such failure domains can be
12922    /// nested, in which case only the inner-most failure domain in which the
12923    /// `Node` resides fails.
12924    pub fn r#release(&self) -> Result<(), fidl::Error> {
12925        NodeProxyInterface::r#release(self)
12926    }
12927
12928    /// Set a name for VMOs in this buffer collection.
12929    ///
12930    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
12931    /// will be truncated to fit. The name of the vmo will be suffixed with the
12932    /// buffer index within the collection (if the suffix fits within
12933    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
12934    /// listed in the inspect data.
12935    ///
12936    /// The name only affects VMOs allocated after the name is set; this call
12937    /// does not rename existing VMOs. If multiple clients set different names
12938    /// then the larger priority value will win. Setting a new name with the
12939    /// same priority as a prior name doesn't change the name.
12940    ///
12941    /// All table fields are currently required.
12942    ///
12943    /// + request `priority` The name is only set if this is the first `SetName`
12944    ///   or if `priority` is greater than any previous `priority` value in
12945    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
12946    /// + request `name` The name for VMOs created under this buffer collection.
12947    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
12948        NodeProxyInterface::r#set_name(self, payload)
12949    }
12950
12951    /// Set information about the current client that can be used by sysmem to
12952    /// help diagnose leaking memory and allocation stalls waiting for a
12953    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
12954    ///
12955    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
12956    /// `Node`(s) derived from this `Node`, unless overriden by
12957    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
12958    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
12959    ///
12960    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
12961    /// `Allocator` is the most efficient way to ensure that all
12962    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
12963    /// set, and is also more efficient than separately sending the same debug
12964    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
12965    /// created [`fuchsia.sysmem2/Node`].
12966    ///
12967    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
12968    /// indicate which client is closing their channel first, leading to subtree
12969    /// failure (which can be normal if the purpose of the subtree is over, but
12970    /// if happening earlier than expected, the client-channel-specific name can
12971    /// help diagnose where the failure is first coming from, from sysmem's
12972    /// point of view).
12973    ///
12974    /// All table fields are currently required.
12975    ///
12976    /// + request `name` This can be an arbitrary string, but the current
12977    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
12978    /// + request `id` This can be an arbitrary id, but the current process ID
12979    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
12980    pub fn r#set_debug_client_info(
12981        &self,
12982        mut payload: &NodeSetDebugClientInfoRequest,
12983    ) -> Result<(), fidl::Error> {
12984        NodeProxyInterface::r#set_debug_client_info(self, payload)
12985    }
12986
12987    /// Sysmem logs a warning if sysmem hasn't seen
12988    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
12989    /// within 5 seconds after creation of a new collection.
12990    ///
12991    /// Clients can call this method to change when the log is printed. If
12992    /// multiple client set the deadline, it's unspecified which deadline will
12993    /// take effect.
12994    ///
12995    /// In most cases the default works well.
12996    ///
12997    /// All table fields are currently required.
12998    ///
12999    /// + request `deadline` The time at which sysmem will start trying to log
13000    ///   the warning, unless all constraints are with sysmem by then.
13001    pub fn r#set_debug_timeout_log_deadline(
13002        &self,
13003        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13004    ) -> Result<(), fidl::Error> {
13005        NodeProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
13006    }
13007
13008    /// This enables verbose logging for the buffer collection.
13009    ///
13010    /// Verbose logging includes constraints set via
13011    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
13012    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
13013    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
13014    /// the tree of `Node`(s).
13015    ///
13016    /// Normally sysmem prints only a single line complaint when aggregation
13017    /// fails, with just the specific detailed reason that aggregation failed,
13018    /// with little surrounding context.  While this is often enough to diagnose
13019    /// a problem if only a small change was made and everything was working
13020    /// before the small change, it's often not particularly helpful for getting
13021    /// a new buffer collection to work for the first time.  Especially with
13022    /// more complex trees of nodes, involving things like
13023    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
13024    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
13025    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
13026    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
13027    /// looks like and why it's failing a logical allocation, or why a tree or
13028    /// subtree is failing sooner than expected.
13029    ///
13030    /// The intent of the extra logging is to be acceptable from a performance
13031    /// point of view, under the assumption that verbose logging is only enabled
13032    /// on a low number of buffer collections. If we're not tracking down a bug,
13033    /// we shouldn't send this message.
13034    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13035        NodeProxyInterface::r#set_verbose_logging(self)
13036    }
13037
13038    /// This gets a handle that can be used as a parameter to
13039    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
13040    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
13041    /// client obtained this handle from this `Node`.
13042    ///
13043    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
13044    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
13045    /// despite the two calls typically being on different channels.
13046    ///
13047    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
13048    ///
13049    /// All table fields are currently required.
13050    ///
13051    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
13052    ///   different `Node` channel, to prove that the client obtained the handle
13053    ///   from this `Node`.
13054    pub fn r#get_node_ref(
13055        &self,
13056    ) -> fidl::client::QueryResponseFut<
13057        NodeGetNodeRefResponse,
13058        fidl::encoding::DefaultFuchsiaResourceDialect,
13059    > {
13060        NodeProxyInterface::r#get_node_ref(self)
13061    }
13062
13063    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
13064    /// rooted at a different child token of a common parent
13065    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
13066    /// passed-in `node_ref`.
13067    ///
13068    /// This call is for assisting with admission control de-duplication, and
13069    /// with debugging.
13070    ///
13071    /// The `node_ref` must be obtained using
13072    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
13073    ///
13074    /// The `node_ref` can be a duplicated handle; it's not necessary to call
13075    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
13076    ///
13077    /// If a calling token may not actually be a valid token at all due to a
13078    /// potentially hostile/untrusted provider of the token, call
13079    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
13080    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
13081    /// never responds due to a calling token not being a real token (not really
13082    /// talking to sysmem).  Another option is to call
13083    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
13084    /// which also validates the token along with converting it to a
13085    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
13086    ///
13087    /// All table fields are currently required.
13088    ///
13089    /// - response `is_alternate`
13090    ///   - true: The first parent node in common between the calling node and
13091    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
13092    ///     that the calling `Node` and the `node_ref` `Node` will not have both
13093    ///     their constraints apply - rather sysmem will choose one or the other
13094    ///     of the constraints - never both.  This is because only one child of
13095    ///     a `BufferCollectionTokenGroup` is selected during logical
13096    ///     allocation, with only that one child's subtree contributing to
13097    ///     constraints aggregation.
13098    ///   - false: The first parent node in common between the calling `Node`
13099    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
13100    ///     Currently, this means the first parent node in common is a
13101    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
13102    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
13103    ///     `Node` may have both their constraints apply during constraints
13104    ///     aggregation of the logical allocation, if both `Node`(s) are
13105    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
13106    ///     this case, there is no `BufferCollectionTokenGroup` that will
13107    ///     directly prevent the two `Node`(s) from both being selected and
13108    ///     their constraints both aggregated, but even when false, one or both
13109    ///     `Node`(s) may still be eliminated from consideration if one or both
13110    ///     `Node`(s) has a direct or indirect parent
13111    ///     `BufferCollectionTokenGroup` which selects a child subtree other
13112    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
13113    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
13114    ///   associated with the same buffer collection as the calling `Node`.
13115    ///   Another reason for this error is if the `node_ref` is an
13116    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
13117    ///   a real `node_ref` obtained from `GetNodeRef`.
13118    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
13119    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
13120    ///   the needed rights expected on a real `node_ref`.
13121    /// * No other failing status codes are returned by this call.  However,
13122    ///   sysmem may add additional codes in future, so the client should have
13123    ///   sensible default handling for any failing status code.
13124    pub fn r#is_alternate_for(
13125        &self,
13126        mut payload: NodeIsAlternateForRequest,
13127    ) -> fidl::client::QueryResponseFut<
13128        NodeIsAlternateForResult,
13129        fidl::encoding::DefaultFuchsiaResourceDialect,
13130    > {
13131        NodeProxyInterface::r#is_alternate_for(self, payload)
13132    }
13133
13134    /// Get the buffer collection ID. This ID is also available from
13135    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
13136    /// within the collection).
13137    ///
13138    /// This call is mainly useful in situations where we can't convey a
13139    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
13140    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
13141    /// handle, which can be joined back up with a `BufferCollection` client end
13142    /// that was created via a different path. Prefer to convey a
13143    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
13144    ///
13145    /// Trusting a `buffer_collection_id` value from a source other than sysmem
13146    /// is analogous to trusting a koid value from a source other than zircon.
13147    /// Both should be avoided unless really necessary, and both require
13148    /// caution. In some situations it may be reasonable to refer to a
13149    /// pre-established `BufferCollection` by `buffer_collection_id` via a
13150    /// protocol for efficiency reasons, but an incoming value purporting to be
13151    /// a `buffer_collection_id` is not sufficient alone to justify granting the
13152    /// sender of the `buffer_collection_id` any capability. The sender must
13153    /// first prove to a receiver that the sender has/had a VMO or has/had a
13154    /// `BufferCollectionToken` to the same collection by sending a handle that
13155    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
13156    /// `buffer_collection_id` value. The receiver should take care to avoid
13157    /// assuming that a sender had a `BufferCollectionToken` in cases where the
13158    /// sender has only proven that the sender had a VMO.
13159    ///
13160    /// - response `buffer_collection_id` This ID is unique per buffer
13161    ///   collection per boot. Each buffer is uniquely identified by the
13162    ///   `buffer_collection_id` and `buffer_index` together.
13163    pub fn r#get_buffer_collection_id(
13164        &self,
13165    ) -> fidl::client::QueryResponseFut<
13166        NodeGetBufferCollectionIdResponse,
13167        fidl::encoding::DefaultFuchsiaResourceDialect,
13168    > {
13169        NodeProxyInterface::r#get_buffer_collection_id(self)
13170    }
13171
13172    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
13173    /// created after this message to weak, which means that a client's `Node`
13174    /// client end (or a child created after this message) is not alone
13175    /// sufficient to keep allocated VMOs alive.
13176    ///
13177    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
13178    /// `close_weak_asap`.
13179    ///
13180    /// This message is only permitted before the `Node` becomes ready for
13181    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
13182    ///   * `BufferCollectionToken`: any time
13183    ///   * `BufferCollection`: before `SetConstraints`
13184    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
13185    ///
13186    /// Currently, no conversion from strong `Node` to weak `Node` after ready
13187    /// for allocation is provided, but a client can simulate that by creating
13188    /// an additional `Node` before allocation and setting that additional
13189    /// `Node` to weak, and then potentially at some point later sending
13190    /// `Release` and closing the client end of the client's strong `Node`, but
13191    /// keeping the client's weak `Node`.
13192    ///
13193    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
13194    /// collection failure (all `Node` client end(s) will see
13195    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
13196    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
13197    /// this situation until all `Node`(s) are ready for allocation. For initial
13198    /// allocation to succeed, at least one strong `Node` is required to exist
13199    /// at allocation time, but after that client receives VMO handles, that
13200    /// client can `BufferCollection.Release` and close the client end without
13201    /// causing this type of failure.
13202    ///
13203    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
13204    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
13205    /// separately as appropriate.
13206    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
13207        NodeProxyInterface::r#set_weak(self)
13208    }
13209
13210    /// This indicates to sysmem that the client is prepared to pay attention to
13211    /// `close_weak_asap`.
13212    ///
13213    /// If sent, this message must be before
13214    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
13215    ///
13216    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
13217    /// send this message before `WaitForAllBuffersAllocated`, or a parent
13218    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
13219    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
13220    /// trigger buffer collection failure.
13221    ///
13222    /// This message is necessary because weak sysmem VMOs have not always been
13223    /// a thing, so older clients are not aware of the need to pay attention to
13224    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
13225    /// sysmem weak VMO handles asap. By having this message and requiring
13226    /// participants to indicate their acceptance of this aspect of the overall
13227    /// protocol, we avoid situations where an older client is delivered a weak
13228    /// VMO without any way for sysmem to get that VMO to close quickly later
13229    /// (and on a per-buffer basis).
13230    ///
13231    /// A participant that doesn't handle `close_weak_asap` and also doesn't
13232    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
13233    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
13234    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
13235    /// same participant has a child/delegate which does retrieve VMOs, that
13236    /// child/delegate will need to send `SetWeakOk` before
13237    /// `WaitForAllBuffersAllocated`.
13238    ///
13239    /// + request `for_child_nodes_also` If present and true, this means direct
13240    ///   child nodes of this node created after this message plus all
13241    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
13242    ///   those nodes. Any child node of this node that was created before this
13243    ///   message is not included. This setting is "sticky" in the sense that a
13244    ///   subsequent `SetWeakOk` without this bool set to true does not reset
13245    ///   the server-side bool. If this creates a problem for a participant, a
13246    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
13247    ///   tokens instead, as appropriate. A participant should only set
13248    ///   `for_child_nodes_also` true if the participant can really promise to
13249    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
13250    ///   weak VMO handles held by participants holding the corresponding child
13251    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
13252    ///   which are using sysmem(1) can be weak, despite the clients of those
13253    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
13254    ///   direct way to find out about `close_weak_asap`. This only applies to
13255    ///   descendents of this `Node` which are using sysmem(1), not to this
13256    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
13257    ///   token, which will fail allocation unless an ancestor of this `Node`
13258    ///   specified `for_child_nodes_also` true.
13259    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13260        NodeProxyInterface::r#set_weak_ok(self, payload)
13261    }
13262
13263    /// The server_end will be closed after this `Node` and any child nodes have
13264    /// have released their buffer counts, making those counts available for
13265    /// reservation by a different `Node` via
13266    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
13267    ///
13268    /// The `Node` buffer counts may not be released until the entire tree of
13269    /// `Node`(s) is closed or failed, because
13270    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
13271    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
13272    /// `Node` buffer counts remain reserved until the orphaned node is later
13273    /// cleaned up.
13274    ///
13275    /// If the `Node` exceeds a fairly large number of attached eventpair server
13276    /// ends, a log message will indicate this and the `Node` (and the
13277    /// appropriate) sub-tree will fail.
13278    ///
13279    /// The `server_end` will remain open when
13280    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
13281    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
13282    /// [`fuchsia.sysmem2/BufferCollection`].
13283    ///
13284    /// This message can also be used with a
13285    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
13286    pub fn r#attach_node_tracking(
13287        &self,
13288        mut payload: NodeAttachNodeTrackingRequest,
13289    ) -> Result<(), fidl::Error> {
13290        NodeProxyInterface::r#attach_node_tracking(self, payload)
13291    }
13292}
13293
13294impl NodeProxyInterface for NodeProxy {
13295    type SyncResponseFut =
13296        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
13297    fn r#sync(&self) -> Self::SyncResponseFut {
13298        fn _decode(
13299            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13300        ) -> Result<(), fidl::Error> {
13301            let _response = fidl::client::decode_transaction_body::<
13302                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
13303                fidl::encoding::DefaultFuchsiaResourceDialect,
13304                0x11ac2555cf575b54,
13305            >(_buf?)?
13306            .into_result::<NodeMarker>("sync")?;
13307            Ok(_response)
13308        }
13309        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
13310            (),
13311            0x11ac2555cf575b54,
13312            fidl::encoding::DynamicFlags::FLEXIBLE,
13313            _decode,
13314        )
13315    }
13316
13317    fn r#release(&self) -> Result<(), fidl::Error> {
13318        self.client.send::<fidl::encoding::EmptyPayload>(
13319            (),
13320            0x6a5cae7d6d6e04c6,
13321            fidl::encoding::DynamicFlags::FLEXIBLE,
13322        )
13323    }
13324
13325    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13326        self.client.send::<NodeSetNameRequest>(
13327            payload,
13328            0xb41f1624f48c1e9,
13329            fidl::encoding::DynamicFlags::FLEXIBLE,
13330        )
13331    }
13332
13333    fn r#set_debug_client_info(
13334        &self,
13335        mut payload: &NodeSetDebugClientInfoRequest,
13336    ) -> Result<(), fidl::Error> {
13337        self.client.send::<NodeSetDebugClientInfoRequest>(
13338            payload,
13339            0x5cde8914608d99b1,
13340            fidl::encoding::DynamicFlags::FLEXIBLE,
13341        )
13342    }
13343
13344    fn r#set_debug_timeout_log_deadline(
13345        &self,
13346        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13347    ) -> Result<(), fidl::Error> {
13348        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
13349            payload,
13350            0x716b0af13d5c0806,
13351            fidl::encoding::DynamicFlags::FLEXIBLE,
13352        )
13353    }
13354
13355    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13356        self.client.send::<fidl::encoding::EmptyPayload>(
13357            (),
13358            0x5209c77415b4dfad,
13359            fidl::encoding::DynamicFlags::FLEXIBLE,
13360        )
13361    }
13362
13363    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
13364        NodeGetNodeRefResponse,
13365        fidl::encoding::DefaultFuchsiaResourceDialect,
13366    >;
13367    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
13368        fn _decode(
13369            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13370        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
13371            let _response = fidl::client::decode_transaction_body::<
13372                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
13373                fidl::encoding::DefaultFuchsiaResourceDialect,
13374                0x5b3d0e51614df053,
13375            >(_buf?)?
13376            .into_result::<NodeMarker>("get_node_ref")?;
13377            Ok(_response)
13378        }
13379        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
13380            (),
13381            0x5b3d0e51614df053,
13382            fidl::encoding::DynamicFlags::FLEXIBLE,
13383            _decode,
13384        )
13385    }
13386
13387    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
13388        NodeIsAlternateForResult,
13389        fidl::encoding::DefaultFuchsiaResourceDialect,
13390    >;
13391    fn r#is_alternate_for(
13392        &self,
13393        mut payload: NodeIsAlternateForRequest,
13394    ) -> Self::IsAlternateForResponseFut {
13395        fn _decode(
13396            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13397        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
13398            let _response = fidl::client::decode_transaction_body::<
13399                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
13400                fidl::encoding::DefaultFuchsiaResourceDialect,
13401                0x3a58e00157e0825,
13402            >(_buf?)?
13403            .into_result::<NodeMarker>("is_alternate_for")?;
13404            Ok(_response.map(|x| x))
13405        }
13406        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
13407            &mut payload,
13408            0x3a58e00157e0825,
13409            fidl::encoding::DynamicFlags::FLEXIBLE,
13410            _decode,
13411        )
13412    }
13413
13414    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
13415        NodeGetBufferCollectionIdResponse,
13416        fidl::encoding::DefaultFuchsiaResourceDialect,
13417    >;
13418    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
13419        fn _decode(
13420            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13421        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
13422            let _response = fidl::client::decode_transaction_body::<
13423                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
13424                fidl::encoding::DefaultFuchsiaResourceDialect,
13425                0x77d19a494b78ba8c,
13426            >(_buf?)?
13427            .into_result::<NodeMarker>("get_buffer_collection_id")?;
13428            Ok(_response)
13429        }
13430        self.client.send_query_and_decode::<
13431            fidl::encoding::EmptyPayload,
13432            NodeGetBufferCollectionIdResponse,
13433        >(
13434            (),
13435            0x77d19a494b78ba8c,
13436            fidl::encoding::DynamicFlags::FLEXIBLE,
13437            _decode,
13438        )
13439    }
13440
13441    fn r#set_weak(&self) -> Result<(), fidl::Error> {
13442        self.client.send::<fidl::encoding::EmptyPayload>(
13443            (),
13444            0x22dd3ea514eeffe1,
13445            fidl::encoding::DynamicFlags::FLEXIBLE,
13446        )
13447    }
13448
13449    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13450        self.client.send::<NodeSetWeakOkRequest>(
13451            &mut payload,
13452            0x38a44fc4d7724be9,
13453            fidl::encoding::DynamicFlags::FLEXIBLE,
13454        )
13455    }
13456
13457    fn r#attach_node_tracking(
13458        &self,
13459        mut payload: NodeAttachNodeTrackingRequest,
13460    ) -> Result<(), fidl::Error> {
13461        self.client.send::<NodeAttachNodeTrackingRequest>(
13462            &mut payload,
13463            0x3f22f2a293d3cdac,
13464            fidl::encoding::DynamicFlags::FLEXIBLE,
13465        )
13466    }
13467}
13468
13469pub struct NodeEventStream {
13470    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
13471}
13472
13473impl std::marker::Unpin for NodeEventStream {}
13474
13475impl futures::stream::FusedStream for NodeEventStream {
13476    fn is_terminated(&self) -> bool {
13477        self.event_receiver.is_terminated()
13478    }
13479}
13480
13481impl futures::Stream for NodeEventStream {
13482    type Item = Result<NodeEvent, fidl::Error>;
13483
13484    fn poll_next(
13485        mut self: std::pin::Pin<&mut Self>,
13486        cx: &mut std::task::Context<'_>,
13487    ) -> std::task::Poll<Option<Self::Item>> {
13488        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
13489            &mut self.event_receiver,
13490            cx
13491        )?) {
13492            Some(buf) => std::task::Poll::Ready(Some(NodeEvent::decode(buf))),
13493            None => std::task::Poll::Ready(None),
13494        }
13495    }
13496}
13497
13498#[derive(Debug)]
13499pub enum NodeEvent {
13500    #[non_exhaustive]
13501    _UnknownEvent {
13502        /// Ordinal of the event that was sent.
13503        ordinal: u64,
13504    },
13505}
13506
13507impl NodeEvent {
13508    /// Decodes a message buffer as a [`NodeEvent`].
13509    fn decode(
13510        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
13511    ) -> Result<NodeEvent, fidl::Error> {
13512        let (bytes, _handles) = buf.split_mut();
13513        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13514        debug_assert_eq!(tx_header.tx_id, 0);
13515        match tx_header.ordinal {
13516            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
13517                Ok(NodeEvent::_UnknownEvent { ordinal: tx_header.ordinal })
13518            }
13519            _ => Err(fidl::Error::UnknownOrdinal {
13520                ordinal: tx_header.ordinal,
13521                protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13522            }),
13523        }
13524    }
13525}
13526
13527/// A Stream of incoming requests for fuchsia.sysmem2/Node.
13528pub struct NodeRequestStream {
13529    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13530    is_terminated: bool,
13531}
13532
13533impl std::marker::Unpin for NodeRequestStream {}
13534
13535impl futures::stream::FusedStream for NodeRequestStream {
13536    fn is_terminated(&self) -> bool {
13537        self.is_terminated
13538    }
13539}
13540
13541impl fidl::endpoints::RequestStream for NodeRequestStream {
13542    type Protocol = NodeMarker;
13543    type ControlHandle = NodeControlHandle;
13544
13545    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
13546        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
13547    }
13548
13549    fn control_handle(&self) -> Self::ControlHandle {
13550        NodeControlHandle { inner: self.inner.clone() }
13551    }
13552
13553    fn into_inner(
13554        self,
13555    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
13556    {
13557        (self.inner, self.is_terminated)
13558    }
13559
13560    fn from_inner(
13561        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13562        is_terminated: bool,
13563    ) -> Self {
13564        Self { inner, is_terminated }
13565    }
13566}
13567
13568impl futures::Stream for NodeRequestStream {
13569    type Item = Result<NodeRequest, fidl::Error>;
13570
13571    fn poll_next(
13572        mut self: std::pin::Pin<&mut Self>,
13573        cx: &mut std::task::Context<'_>,
13574    ) -> std::task::Poll<Option<Self::Item>> {
13575        let this = &mut *self;
13576        if this.inner.check_shutdown(cx) {
13577            this.is_terminated = true;
13578            return std::task::Poll::Ready(None);
13579        }
13580        if this.is_terminated {
13581            panic!("polled NodeRequestStream after completion");
13582        }
13583        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
13584            |bytes, handles| {
13585                match this.inner.channel().read_etc(cx, bytes, handles) {
13586                    std::task::Poll::Ready(Ok(())) => {}
13587                    std::task::Poll::Pending => return std::task::Poll::Pending,
13588                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
13589                        this.is_terminated = true;
13590                        return std::task::Poll::Ready(None);
13591                    }
13592                    std::task::Poll::Ready(Err(e)) => {
13593                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
13594                            e.into(),
13595                        ))))
13596                    }
13597                }
13598
13599                // A message has been received from the channel
13600                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13601
13602                std::task::Poll::Ready(Some(match header.ordinal {
13603                    0x11ac2555cf575b54 => {
13604                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13605                        let mut req = fidl::new_empty!(
13606                            fidl::encoding::EmptyPayload,
13607                            fidl::encoding::DefaultFuchsiaResourceDialect
13608                        );
13609                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13610                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13611                        Ok(NodeRequest::Sync {
13612                            responder: NodeSyncResponder {
13613                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13614                                tx_id: header.tx_id,
13615                            },
13616                        })
13617                    }
13618                    0x6a5cae7d6d6e04c6 => {
13619                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13620                        let mut req = fidl::new_empty!(
13621                            fidl::encoding::EmptyPayload,
13622                            fidl::encoding::DefaultFuchsiaResourceDialect
13623                        );
13624                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13625                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13626                        Ok(NodeRequest::Release { control_handle })
13627                    }
13628                    0xb41f1624f48c1e9 => {
13629                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13630                        let mut req = fidl::new_empty!(
13631                            NodeSetNameRequest,
13632                            fidl::encoding::DefaultFuchsiaResourceDialect
13633                        );
13634                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
13635                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13636                        Ok(NodeRequest::SetName { payload: req, control_handle })
13637                    }
13638                    0x5cde8914608d99b1 => {
13639                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13640                        let mut req = fidl::new_empty!(
13641                            NodeSetDebugClientInfoRequest,
13642                            fidl::encoding::DefaultFuchsiaResourceDialect
13643                        );
13644                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
13645                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13646                        Ok(NodeRequest::SetDebugClientInfo { payload: req, control_handle })
13647                    }
13648                    0x716b0af13d5c0806 => {
13649                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13650                        let mut req = fidl::new_empty!(
13651                            NodeSetDebugTimeoutLogDeadlineRequest,
13652                            fidl::encoding::DefaultFuchsiaResourceDialect
13653                        );
13654                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
13655                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13656                        Ok(NodeRequest::SetDebugTimeoutLogDeadline { payload: req, control_handle })
13657                    }
13658                    0x5209c77415b4dfad => {
13659                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13660                        let mut req = fidl::new_empty!(
13661                            fidl::encoding::EmptyPayload,
13662                            fidl::encoding::DefaultFuchsiaResourceDialect
13663                        );
13664                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13665                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13666                        Ok(NodeRequest::SetVerboseLogging { control_handle })
13667                    }
13668                    0x5b3d0e51614df053 => {
13669                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13670                        let mut req = fidl::new_empty!(
13671                            fidl::encoding::EmptyPayload,
13672                            fidl::encoding::DefaultFuchsiaResourceDialect
13673                        );
13674                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13675                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13676                        Ok(NodeRequest::GetNodeRef {
13677                            responder: NodeGetNodeRefResponder {
13678                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13679                                tx_id: header.tx_id,
13680                            },
13681                        })
13682                    }
13683                    0x3a58e00157e0825 => {
13684                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13685                        let mut req = fidl::new_empty!(
13686                            NodeIsAlternateForRequest,
13687                            fidl::encoding::DefaultFuchsiaResourceDialect
13688                        );
13689                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
13690                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13691                        Ok(NodeRequest::IsAlternateFor {
13692                            payload: req,
13693                            responder: NodeIsAlternateForResponder {
13694                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13695                                tx_id: header.tx_id,
13696                            },
13697                        })
13698                    }
13699                    0x77d19a494b78ba8c => {
13700                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13701                        let mut req = fidl::new_empty!(
13702                            fidl::encoding::EmptyPayload,
13703                            fidl::encoding::DefaultFuchsiaResourceDialect
13704                        );
13705                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13706                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13707                        Ok(NodeRequest::GetBufferCollectionId {
13708                            responder: NodeGetBufferCollectionIdResponder {
13709                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13710                                tx_id: header.tx_id,
13711                            },
13712                        })
13713                    }
13714                    0x22dd3ea514eeffe1 => {
13715                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13716                        let mut req = fidl::new_empty!(
13717                            fidl::encoding::EmptyPayload,
13718                            fidl::encoding::DefaultFuchsiaResourceDialect
13719                        );
13720                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13721                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13722                        Ok(NodeRequest::SetWeak { control_handle })
13723                    }
13724                    0x38a44fc4d7724be9 => {
13725                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13726                        let mut req = fidl::new_empty!(
13727                            NodeSetWeakOkRequest,
13728                            fidl::encoding::DefaultFuchsiaResourceDialect
13729                        );
13730                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
13731                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13732                        Ok(NodeRequest::SetWeakOk { payload: req, control_handle })
13733                    }
13734                    0x3f22f2a293d3cdac => {
13735                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13736                        let mut req = fidl::new_empty!(
13737                            NodeAttachNodeTrackingRequest,
13738                            fidl::encoding::DefaultFuchsiaResourceDialect
13739                        );
13740                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
13741                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13742                        Ok(NodeRequest::AttachNodeTracking { payload: req, control_handle })
13743                    }
13744                    _ if header.tx_id == 0
13745                        && header
13746                            .dynamic_flags()
13747                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13748                    {
13749                        Ok(NodeRequest::_UnknownMethod {
13750                            ordinal: header.ordinal,
13751                            control_handle: NodeControlHandle { inner: this.inner.clone() },
13752                            method_type: fidl::MethodType::OneWay,
13753                        })
13754                    }
13755                    _ if header
13756                        .dynamic_flags()
13757                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13758                    {
13759                        this.inner.send_framework_err(
13760                            fidl::encoding::FrameworkErr::UnknownMethod,
13761                            header.tx_id,
13762                            header.ordinal,
13763                            header.dynamic_flags(),
13764                            (bytes, handles),
13765                        )?;
13766                        Ok(NodeRequest::_UnknownMethod {
13767                            ordinal: header.ordinal,
13768                            control_handle: NodeControlHandle { inner: this.inner.clone() },
13769                            method_type: fidl::MethodType::TwoWay,
13770                        })
13771                    }
13772                    _ => Err(fidl::Error::UnknownOrdinal {
13773                        ordinal: header.ordinal,
13774                        protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13775                    }),
13776                }))
13777            },
13778        )
13779    }
13780}
13781
13782/// This protocol is the parent protocol for all nodes in the tree established
13783/// by [`fuchsia.sysmem2/BufferCollectionToken`] creation and
13784/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] creation, including
13785/// [`fuchsia.sysmem2/BufferCollectionToken`](s) which have since been converted
13786/// to a [`fuchsia.sysmem2/BufferCollection`] channel.
13787///
13788/// Epitaphs are not used in this protocol.
13789#[derive(Debug)]
13790pub enum NodeRequest {
13791    /// Ensure that previous messages have been received server side. This is
13792    /// particularly useful after previous messages that created new tokens,
13793    /// because a token must be known to the sysmem server before sending the
13794    /// token to another participant.
13795    ///
13796    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
13797    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
13798    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
13799    /// to mitigate the possibility of a hostile/fake
13800    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
13801    /// Another way is to pass the token to
13802    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
13803    /// the token as part of exchanging it for a
13804    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
13805    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
13806    /// of stalling.
13807    ///
13808    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
13809    /// and then starting and completing a `Sync`, it's then safe to send the
13810    /// `BufferCollectionToken` client ends to other participants knowing the
13811    /// server will recognize the tokens when they're sent by the other
13812    /// participants to sysmem in a
13813    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
13814    /// efficient way to create tokens while avoiding unnecessary round trips.
13815    ///
13816    /// Other options include waiting for each
13817    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
13818    /// individually (using separate call to `Sync` after each), or calling
13819    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
13820    /// converted to a `BufferCollection` via
13821    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
13822    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
13823    /// the sync step and can create multiple tokens at once.
13824    Sync { responder: NodeSyncResponder },
13825    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
13826    ///
13827    /// Normally a participant will convert a `BufferCollectionToken` into a
13828    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
13829    /// `Release` via the token (and then close the channel immediately or
13830    /// shortly later in response to server closing the server end), which
13831    /// avoids causing buffer collection failure. Without a prior `Release`,
13832    /// closing the `BufferCollectionToken` client end will cause buffer
13833    /// collection failure.
13834    ///
13835    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
13836    ///
13837    /// By default the server handles unexpected closure of a
13838    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
13839    /// first) by failing the buffer collection. Partly this is to expedite
13840    /// closing VMO handles to reclaim memory when any participant fails. If a
13841    /// participant would like to cleanly close a `BufferCollection` without
13842    /// causing buffer collection failure, the participant can send `Release`
13843    /// before closing the `BufferCollection` client end. The `Release` can
13844    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
13845    /// buffer collection won't require constraints from this node in order to
13846    /// allocate. If after `SetConstraints`, the constraints are retained and
13847    /// aggregated, despite the lack of `BufferCollection` connection at the
13848    /// time of constraints aggregation.
13849    ///
13850    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13851    ///
13852    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13853    /// end (without `Release` first) will trigger failure of the buffer
13854    /// collection. To close a `BufferCollectionTokenGroup` channel without
13855    /// failing the buffer collection, ensure that AllChildrenPresent() has been
13856    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13857    /// client end.
13858    ///
13859    /// If `Release` occurs before
13860    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13861    /// buffer collection will fail (triggered by reception of `Release` without
13862    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13863    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13864    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13865    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13866    /// close requires `AllChildrenPresent` (if not already sent), then
13867    /// `Release`, then close client end.
13868    ///
13869    /// If `Release` occurs after `AllChildrenPresent`, the children and all
13870    /// their constraints remain intact (just as they would if the
13871    /// `BufferCollectionTokenGroup` channel had remained open), and the client
13872    /// end close doesn't trigger buffer collection failure.
13873    ///
13874    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
13875    ///
13876    /// For brevity, the per-channel-protocol paragraphs above ignore the
13877    /// separate failure domain created by
13878    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
13879    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
13880    /// unexpectedly closes (without `Release` first) and that client end is
13881    /// under a failure domain, instead of failing the whole buffer collection,
13882    /// the failure domain is failed, but the buffer collection itself is
13883    /// isolated from failure of the failure domain. Such failure domains can be
13884    /// nested, in which case only the inner-most failure domain in which the
13885    /// `Node` resides fails.
13886    Release { control_handle: NodeControlHandle },
13887    /// Set a name for VMOs in this buffer collection.
13888    ///
13889    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
13890    /// will be truncated to fit. The name of the vmo will be suffixed with the
13891    /// buffer index within the collection (if the suffix fits within
13892    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
13893    /// listed in the inspect data.
13894    ///
13895    /// The name only affects VMOs allocated after the name is set; this call
13896    /// does not rename existing VMOs. If multiple clients set different names
13897    /// then the larger priority value will win. Setting a new name with the
13898    /// same priority as a prior name doesn't change the name.
13899    ///
13900    /// All table fields are currently required.
13901    ///
13902    /// + request `priority` The name is only set if this is the first `SetName`
13903    ///   or if `priority` is greater than any previous `priority` value in
13904    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
13905    /// + request `name` The name for VMOs created under this buffer collection.
13906    SetName { payload: NodeSetNameRequest, control_handle: NodeControlHandle },
13907    /// Set information about the current client that can be used by sysmem to
13908    /// help diagnose leaking memory and allocation stalls waiting for a
13909    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
13910    ///
13911    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
13912    /// `Node`(s) derived from this `Node`, unless overriden by
13913    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
13914    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
13915    ///
13916    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
13917    /// `Allocator` is the most efficient way to ensure that all
13918    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
13919    /// set, and is also more efficient than separately sending the same debug
13920    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
13921    /// created [`fuchsia.sysmem2/Node`].
13922    ///
13923    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
13924    /// indicate which client is closing their channel first, leading to subtree
13925    /// failure (which can be normal if the purpose of the subtree is over, but
13926    /// if happening earlier than expected, the client-channel-specific name can
13927    /// help diagnose where the failure is first coming from, from sysmem's
13928    /// point of view).
13929    ///
13930    /// All table fields are currently required.
13931    ///
13932    /// + request `name` This can be an arbitrary string, but the current
13933    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
13934    /// + request `id` This can be an arbitrary id, but the current process ID
13935    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
13936    SetDebugClientInfo { payload: NodeSetDebugClientInfoRequest, control_handle: NodeControlHandle },
13937    /// Sysmem logs a warning if sysmem hasn't seen
13938    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
13939    /// within 5 seconds after creation of a new collection.
13940    ///
13941    /// Clients can call this method to change when the log is printed. If
13942    /// multiple client set the deadline, it's unspecified which deadline will
13943    /// take effect.
13944    ///
13945    /// In most cases the default works well.
13946    ///
13947    /// All table fields are currently required.
13948    ///
13949    /// + request `deadline` The time at which sysmem will start trying to log
13950    ///   the warning, unless all constraints are with sysmem by then.
13951    SetDebugTimeoutLogDeadline {
13952        payload: NodeSetDebugTimeoutLogDeadlineRequest,
13953        control_handle: NodeControlHandle,
13954    },
13955    /// This enables verbose logging for the buffer collection.
13956    ///
13957    /// Verbose logging includes constraints set via
13958    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
13959    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
13960    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
13961    /// the tree of `Node`(s).
13962    ///
13963    /// Normally sysmem prints only a single line complaint when aggregation
13964    /// fails, with just the specific detailed reason that aggregation failed,
13965    /// with little surrounding context.  While this is often enough to diagnose
13966    /// a problem if only a small change was made and everything was working
13967    /// before the small change, it's often not particularly helpful for getting
13968    /// a new buffer collection to work for the first time.  Especially with
13969    /// more complex trees of nodes, involving things like
13970    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
13971    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
13972    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
13973    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
13974    /// looks like and why it's failing a logical allocation, or why a tree or
13975    /// subtree is failing sooner than expected.
13976    ///
13977    /// The intent of the extra logging is to be acceptable from a performance
13978    /// point of view, under the assumption that verbose logging is only enabled
13979    /// on a low number of buffer collections. If we're not tracking down a bug,
13980    /// we shouldn't send this message.
13981    SetVerboseLogging { control_handle: NodeControlHandle },
13982    /// This gets a handle that can be used as a parameter to
13983    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
13984    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
13985    /// client obtained this handle from this `Node`.
13986    ///
13987    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
13988    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
13989    /// despite the two calls typically being on different channels.
13990    ///
13991    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
13992    ///
13993    /// All table fields are currently required.
13994    ///
13995    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
13996    ///   different `Node` channel, to prove that the client obtained the handle
13997    ///   from this `Node`.
13998    GetNodeRef { responder: NodeGetNodeRefResponder },
13999    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
14000    /// rooted at a different child token of a common parent
14001    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
14002    /// passed-in `node_ref`.
14003    ///
14004    /// This call is for assisting with admission control de-duplication, and
14005    /// with debugging.
14006    ///
14007    /// The `node_ref` must be obtained using
14008    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
14009    ///
14010    /// The `node_ref` can be a duplicated handle; it's not necessary to call
14011    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
14012    ///
14013    /// If a calling token may not actually be a valid token at all due to a
14014    /// potentially hostile/untrusted provider of the token, call
14015    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
14016    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
14017    /// never responds due to a calling token not being a real token (not really
14018    /// talking to sysmem).  Another option is to call
14019    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
14020    /// which also validates the token along with converting it to a
14021    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
14022    ///
14023    /// All table fields are currently required.
14024    ///
14025    /// - response `is_alternate`
14026    ///   - true: The first parent node in common between the calling node and
14027    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
14028    ///     that the calling `Node` and the `node_ref` `Node` will not have both
14029    ///     their constraints apply - rather sysmem will choose one or the other
14030    ///     of the constraints - never both.  This is because only one child of
14031    ///     a `BufferCollectionTokenGroup` is selected during logical
14032    ///     allocation, with only that one child's subtree contributing to
14033    ///     constraints aggregation.
14034    ///   - false: The first parent node in common between the calling `Node`
14035    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
14036    ///     Currently, this means the first parent node in common is a
14037    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
14038    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
14039    ///     `Node` may have both their constraints apply during constraints
14040    ///     aggregation of the logical allocation, if both `Node`(s) are
14041    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
14042    ///     this case, there is no `BufferCollectionTokenGroup` that will
14043    ///     directly prevent the two `Node`(s) from both being selected and
14044    ///     their constraints both aggregated, but even when false, one or both
14045    ///     `Node`(s) may still be eliminated from consideration if one or both
14046    ///     `Node`(s) has a direct or indirect parent
14047    ///     `BufferCollectionTokenGroup` which selects a child subtree other
14048    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
14049    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
14050    ///   associated with the same buffer collection as the calling `Node`.
14051    ///   Another reason for this error is if the `node_ref` is an
14052    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
14053    ///   a real `node_ref` obtained from `GetNodeRef`.
14054    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
14055    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
14056    ///   the needed rights expected on a real `node_ref`.
14057    /// * No other failing status codes are returned by this call.  However,
14058    ///   sysmem may add additional codes in future, so the client should have
14059    ///   sensible default handling for any failing status code.
14060    IsAlternateFor { payload: NodeIsAlternateForRequest, responder: NodeIsAlternateForResponder },
14061    /// Get the buffer collection ID. This ID is also available from
14062    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
14063    /// within the collection).
14064    ///
14065    /// This call is mainly useful in situations where we can't convey a
14066    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
14067    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
14068    /// handle, which can be joined back up with a `BufferCollection` client end
14069    /// that was created via a different path. Prefer to convey a
14070    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
14071    ///
14072    /// Trusting a `buffer_collection_id` value from a source other than sysmem
14073    /// is analogous to trusting a koid value from a source other than zircon.
14074    /// Both should be avoided unless really necessary, and both require
14075    /// caution. In some situations it may be reasonable to refer to a
14076    /// pre-established `BufferCollection` by `buffer_collection_id` via a
14077    /// protocol for efficiency reasons, but an incoming value purporting to be
14078    /// a `buffer_collection_id` is not sufficient alone to justify granting the
14079    /// sender of the `buffer_collection_id` any capability. The sender must
14080    /// first prove to a receiver that the sender has/had a VMO or has/had a
14081    /// `BufferCollectionToken` to the same collection by sending a handle that
14082    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
14083    /// `buffer_collection_id` value. The receiver should take care to avoid
14084    /// assuming that a sender had a `BufferCollectionToken` in cases where the
14085    /// sender has only proven that the sender had a VMO.
14086    ///
14087    /// - response `buffer_collection_id` This ID is unique per buffer
14088    ///   collection per boot. Each buffer is uniquely identified by the
14089    ///   `buffer_collection_id` and `buffer_index` together.
14090    GetBufferCollectionId { responder: NodeGetBufferCollectionIdResponder },
14091    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
14092    /// created after this message to weak, which means that a client's `Node`
14093    /// client end (or a child created after this message) is not alone
14094    /// sufficient to keep allocated VMOs alive.
14095    ///
14096    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
14097    /// `close_weak_asap`.
14098    ///
14099    /// This message is only permitted before the `Node` becomes ready for
14100    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
14101    ///   * `BufferCollectionToken`: any time
14102    ///   * `BufferCollection`: before `SetConstraints`
14103    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
14104    ///
14105    /// Currently, no conversion from strong `Node` to weak `Node` after ready
14106    /// for allocation is provided, but a client can simulate that by creating
14107    /// an additional `Node` before allocation and setting that additional
14108    /// `Node` to weak, and then potentially at some point later sending
14109    /// `Release` and closing the client end of the client's strong `Node`, but
14110    /// keeping the client's weak `Node`.
14111    ///
14112    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
14113    /// collection failure (all `Node` client end(s) will see
14114    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
14115    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
14116    /// this situation until all `Node`(s) are ready for allocation. For initial
14117    /// allocation to succeed, at least one strong `Node` is required to exist
14118    /// at allocation time, but after that client receives VMO handles, that
14119    /// client can `BufferCollection.Release` and close the client end without
14120    /// causing this type of failure.
14121    ///
14122    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
14123    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
14124    /// separately as appropriate.
14125    SetWeak { control_handle: NodeControlHandle },
14126    /// This indicates to sysmem that the client is prepared to pay attention to
14127    /// `close_weak_asap`.
14128    ///
14129    /// If sent, this message must be before
14130    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
14131    ///
14132    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
14133    /// send this message before `WaitForAllBuffersAllocated`, or a parent
14134    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
14135    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
14136    /// trigger buffer collection failure.
14137    ///
14138    /// This message is necessary because weak sysmem VMOs have not always been
14139    /// a thing, so older clients are not aware of the need to pay attention to
14140    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
14141    /// sysmem weak VMO handles asap. By having this message and requiring
14142    /// participants to indicate their acceptance of this aspect of the overall
14143    /// protocol, we avoid situations where an older client is delivered a weak
14144    /// VMO without any way for sysmem to get that VMO to close quickly later
14145    /// (and on a per-buffer basis).
14146    ///
14147    /// A participant that doesn't handle `close_weak_asap` and also doesn't
14148    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
14149    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
14150    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
14151    /// same participant has a child/delegate which does retrieve VMOs, that
14152    /// child/delegate will need to send `SetWeakOk` before
14153    /// `WaitForAllBuffersAllocated`.
14154    ///
14155    /// + request `for_child_nodes_also` If present and true, this means direct
14156    ///   child nodes of this node created after this message plus all
14157    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
14158    ///   those nodes. Any child node of this node that was created before this
14159    ///   message is not included. This setting is "sticky" in the sense that a
14160    ///   subsequent `SetWeakOk` without this bool set to true does not reset
14161    ///   the server-side bool. If this creates a problem for a participant, a
14162    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
14163    ///   tokens instead, as appropriate. A participant should only set
14164    ///   `for_child_nodes_also` true if the participant can really promise to
14165    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
14166    ///   weak VMO handles held by participants holding the corresponding child
14167    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
14168    ///   which are using sysmem(1) can be weak, despite the clients of those
14169    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
14170    ///   direct way to find out about `close_weak_asap`. This only applies to
14171    ///   descendents of this `Node` which are using sysmem(1), not to this
14172    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
14173    ///   token, which will fail allocation unless an ancestor of this `Node`
14174    ///   specified `for_child_nodes_also` true.
14175    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: NodeControlHandle },
14176    /// The server_end will be closed after this `Node` and any child nodes have
14177    /// have released their buffer counts, making those counts available for
14178    /// reservation by a different `Node` via
14179    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
14180    ///
14181    /// The `Node` buffer counts may not be released until the entire tree of
14182    /// `Node`(s) is closed or failed, because
14183    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
14184    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
14185    /// `Node` buffer counts remain reserved until the orphaned node is later
14186    /// cleaned up.
14187    ///
14188    /// If the `Node` exceeds a fairly large number of attached eventpair server
14189    /// ends, a log message will indicate this and the `Node` (and the
14190    /// appropriate) sub-tree will fail.
14191    ///
14192    /// The `server_end` will remain open when
14193    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
14194    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
14195    /// [`fuchsia.sysmem2/BufferCollection`].
14196    ///
14197    /// This message can also be used with a
14198    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
14199    AttachNodeTracking { payload: NodeAttachNodeTrackingRequest, control_handle: NodeControlHandle },
14200    /// An interaction was received which does not match any known method.
14201    #[non_exhaustive]
14202    _UnknownMethod {
14203        /// Ordinal of the method that was called.
14204        ordinal: u64,
14205        control_handle: NodeControlHandle,
14206        method_type: fidl::MethodType,
14207    },
14208}
14209
14210impl NodeRequest {
14211    #[allow(irrefutable_let_patterns)]
14212    pub fn into_sync(self) -> Option<(NodeSyncResponder)> {
14213        if let NodeRequest::Sync { responder } = self {
14214            Some((responder))
14215        } else {
14216            None
14217        }
14218    }
14219
14220    #[allow(irrefutable_let_patterns)]
14221    pub fn into_release(self) -> Option<(NodeControlHandle)> {
14222        if let NodeRequest::Release { control_handle } = self {
14223            Some((control_handle))
14224        } else {
14225            None
14226        }
14227    }
14228
14229    #[allow(irrefutable_let_patterns)]
14230    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, NodeControlHandle)> {
14231        if let NodeRequest::SetName { payload, control_handle } = self {
14232            Some((payload, control_handle))
14233        } else {
14234            None
14235        }
14236    }
14237
14238    #[allow(irrefutable_let_patterns)]
14239    pub fn into_set_debug_client_info(
14240        self,
14241    ) -> Option<(NodeSetDebugClientInfoRequest, NodeControlHandle)> {
14242        if let NodeRequest::SetDebugClientInfo { payload, control_handle } = self {
14243            Some((payload, control_handle))
14244        } else {
14245            None
14246        }
14247    }
14248
14249    #[allow(irrefutable_let_patterns)]
14250    pub fn into_set_debug_timeout_log_deadline(
14251        self,
14252    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, NodeControlHandle)> {
14253        if let NodeRequest::SetDebugTimeoutLogDeadline { payload, control_handle } = self {
14254            Some((payload, control_handle))
14255        } else {
14256            None
14257        }
14258    }
14259
14260    #[allow(irrefutable_let_patterns)]
14261    pub fn into_set_verbose_logging(self) -> Option<(NodeControlHandle)> {
14262        if let NodeRequest::SetVerboseLogging { control_handle } = self {
14263            Some((control_handle))
14264        } else {
14265            None
14266        }
14267    }
14268
14269    #[allow(irrefutable_let_patterns)]
14270    pub fn into_get_node_ref(self) -> Option<(NodeGetNodeRefResponder)> {
14271        if let NodeRequest::GetNodeRef { responder } = self {
14272            Some((responder))
14273        } else {
14274            None
14275        }
14276    }
14277
14278    #[allow(irrefutable_let_patterns)]
14279    pub fn into_is_alternate_for(
14280        self,
14281    ) -> Option<(NodeIsAlternateForRequest, NodeIsAlternateForResponder)> {
14282        if let NodeRequest::IsAlternateFor { payload, responder } = self {
14283            Some((payload, responder))
14284        } else {
14285            None
14286        }
14287    }
14288
14289    #[allow(irrefutable_let_patterns)]
14290    pub fn into_get_buffer_collection_id(self) -> Option<(NodeGetBufferCollectionIdResponder)> {
14291        if let NodeRequest::GetBufferCollectionId { responder } = self {
14292            Some((responder))
14293        } else {
14294            None
14295        }
14296    }
14297
14298    #[allow(irrefutable_let_patterns)]
14299    pub fn into_set_weak(self) -> Option<(NodeControlHandle)> {
14300        if let NodeRequest::SetWeak { control_handle } = self {
14301            Some((control_handle))
14302        } else {
14303            None
14304        }
14305    }
14306
14307    #[allow(irrefutable_let_patterns)]
14308    pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, NodeControlHandle)> {
14309        if let NodeRequest::SetWeakOk { payload, control_handle } = self {
14310            Some((payload, control_handle))
14311        } else {
14312            None
14313        }
14314    }
14315
14316    #[allow(irrefutable_let_patterns)]
14317    pub fn into_attach_node_tracking(
14318        self,
14319    ) -> Option<(NodeAttachNodeTrackingRequest, NodeControlHandle)> {
14320        if let NodeRequest::AttachNodeTracking { payload, control_handle } = self {
14321            Some((payload, control_handle))
14322        } else {
14323            None
14324        }
14325    }
14326
14327    /// Name of the method defined in FIDL
14328    pub fn method_name(&self) -> &'static str {
14329        match *self {
14330            NodeRequest::Sync { .. } => "sync",
14331            NodeRequest::Release { .. } => "release",
14332            NodeRequest::SetName { .. } => "set_name",
14333            NodeRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
14334            NodeRequest::SetDebugTimeoutLogDeadline { .. } => "set_debug_timeout_log_deadline",
14335            NodeRequest::SetVerboseLogging { .. } => "set_verbose_logging",
14336            NodeRequest::GetNodeRef { .. } => "get_node_ref",
14337            NodeRequest::IsAlternateFor { .. } => "is_alternate_for",
14338            NodeRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
14339            NodeRequest::SetWeak { .. } => "set_weak",
14340            NodeRequest::SetWeakOk { .. } => "set_weak_ok",
14341            NodeRequest::AttachNodeTracking { .. } => "attach_node_tracking",
14342            NodeRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
14343                "unknown one-way method"
14344            }
14345            NodeRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
14346                "unknown two-way method"
14347            }
14348        }
14349    }
14350}
14351
14352#[derive(Debug, Clone)]
14353pub struct NodeControlHandle {
14354    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
14355}
14356
14357impl fidl::endpoints::ControlHandle for NodeControlHandle {
14358    fn shutdown(&self) {
14359        self.inner.shutdown()
14360    }
14361    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
14362        self.inner.shutdown_with_epitaph(status)
14363    }
14364
14365    fn is_closed(&self) -> bool {
14366        self.inner.channel().is_closed()
14367    }
14368    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
14369        self.inner.channel().on_closed()
14370    }
14371
14372    #[cfg(target_os = "fuchsia")]
14373    fn signal_peer(
14374        &self,
14375        clear_mask: zx::Signals,
14376        set_mask: zx::Signals,
14377    ) -> Result<(), zx_status::Status> {
14378        use fidl::Peered;
14379        self.inner.channel().signal_peer(clear_mask, set_mask)
14380    }
14381}
14382
14383impl NodeControlHandle {}
14384
14385#[must_use = "FIDL methods require a response to be sent"]
14386#[derive(Debug)]
14387pub struct NodeSyncResponder {
14388    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14389    tx_id: u32,
14390}
14391
14392/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14393/// if the responder is dropped without sending a response, so that the client
14394/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14395impl std::ops::Drop for NodeSyncResponder {
14396    fn drop(&mut self) {
14397        self.control_handle.shutdown();
14398        // Safety: drops once, never accessed again
14399        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14400    }
14401}
14402
14403impl fidl::endpoints::Responder for NodeSyncResponder {
14404    type ControlHandle = NodeControlHandle;
14405
14406    fn control_handle(&self) -> &NodeControlHandle {
14407        &self.control_handle
14408    }
14409
14410    fn drop_without_shutdown(mut self) {
14411        // Safety: drops once, never accessed again due to mem::forget
14412        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14413        // Prevent Drop from running (which would shut down the channel)
14414        std::mem::forget(self);
14415    }
14416}
14417
14418impl NodeSyncResponder {
14419    /// Sends a response to the FIDL transaction.
14420    ///
14421    /// Sets the channel to shutdown if an error occurs.
14422    pub fn send(self) -> Result<(), fidl::Error> {
14423        let _result = self.send_raw();
14424        if _result.is_err() {
14425            self.control_handle.shutdown();
14426        }
14427        self.drop_without_shutdown();
14428        _result
14429    }
14430
14431    /// Similar to "send" but does not shutdown the channel if an error occurs.
14432    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
14433        let _result = self.send_raw();
14434        self.drop_without_shutdown();
14435        _result
14436    }
14437
14438    fn send_raw(&self) -> Result<(), fidl::Error> {
14439        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
14440            fidl::encoding::Flexible::new(()),
14441            self.tx_id,
14442            0x11ac2555cf575b54,
14443            fidl::encoding::DynamicFlags::FLEXIBLE,
14444        )
14445    }
14446}
14447
14448#[must_use = "FIDL methods require a response to be sent"]
14449#[derive(Debug)]
14450pub struct NodeGetNodeRefResponder {
14451    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14452    tx_id: u32,
14453}
14454
14455/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14456/// if the responder is dropped without sending a response, so that the client
14457/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14458impl std::ops::Drop for NodeGetNodeRefResponder {
14459    fn drop(&mut self) {
14460        self.control_handle.shutdown();
14461        // Safety: drops once, never accessed again
14462        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14463    }
14464}
14465
14466impl fidl::endpoints::Responder for NodeGetNodeRefResponder {
14467    type ControlHandle = NodeControlHandle;
14468
14469    fn control_handle(&self) -> &NodeControlHandle {
14470        &self.control_handle
14471    }
14472
14473    fn drop_without_shutdown(mut self) {
14474        // Safety: drops once, never accessed again due to mem::forget
14475        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14476        // Prevent Drop from running (which would shut down the channel)
14477        std::mem::forget(self);
14478    }
14479}
14480
14481impl NodeGetNodeRefResponder {
14482    /// Sends a response to the FIDL transaction.
14483    ///
14484    /// Sets the channel to shutdown if an error occurs.
14485    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14486        let _result = self.send_raw(payload);
14487        if _result.is_err() {
14488            self.control_handle.shutdown();
14489        }
14490        self.drop_without_shutdown();
14491        _result
14492    }
14493
14494    /// Similar to "send" but does not shutdown the channel if an error occurs.
14495    pub fn send_no_shutdown_on_err(
14496        self,
14497        mut payload: NodeGetNodeRefResponse,
14498    ) -> Result<(), fidl::Error> {
14499        let _result = self.send_raw(payload);
14500        self.drop_without_shutdown();
14501        _result
14502    }
14503
14504    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14505        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
14506            fidl::encoding::Flexible::new(&mut payload),
14507            self.tx_id,
14508            0x5b3d0e51614df053,
14509            fidl::encoding::DynamicFlags::FLEXIBLE,
14510        )
14511    }
14512}
14513
14514#[must_use = "FIDL methods require a response to be sent"]
14515#[derive(Debug)]
14516pub struct NodeIsAlternateForResponder {
14517    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14518    tx_id: u32,
14519}
14520
14521/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14522/// if the responder is dropped without sending a response, so that the client
14523/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14524impl std::ops::Drop for NodeIsAlternateForResponder {
14525    fn drop(&mut self) {
14526        self.control_handle.shutdown();
14527        // Safety: drops once, never accessed again
14528        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14529    }
14530}
14531
14532impl fidl::endpoints::Responder for NodeIsAlternateForResponder {
14533    type ControlHandle = NodeControlHandle;
14534
14535    fn control_handle(&self) -> &NodeControlHandle {
14536        &self.control_handle
14537    }
14538
14539    fn drop_without_shutdown(mut self) {
14540        // Safety: drops once, never accessed again due to mem::forget
14541        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14542        // Prevent Drop from running (which would shut down the channel)
14543        std::mem::forget(self);
14544    }
14545}
14546
14547impl NodeIsAlternateForResponder {
14548    /// Sends a response to the FIDL transaction.
14549    ///
14550    /// Sets the channel to shutdown if an error occurs.
14551    pub fn send(
14552        self,
14553        mut result: Result<&NodeIsAlternateForResponse, Error>,
14554    ) -> Result<(), fidl::Error> {
14555        let _result = self.send_raw(result);
14556        if _result.is_err() {
14557            self.control_handle.shutdown();
14558        }
14559        self.drop_without_shutdown();
14560        _result
14561    }
14562
14563    /// Similar to "send" but does not shutdown the channel if an error occurs.
14564    pub fn send_no_shutdown_on_err(
14565        self,
14566        mut result: Result<&NodeIsAlternateForResponse, Error>,
14567    ) -> Result<(), fidl::Error> {
14568        let _result = self.send_raw(result);
14569        self.drop_without_shutdown();
14570        _result
14571    }
14572
14573    fn send_raw(
14574        &self,
14575        mut result: Result<&NodeIsAlternateForResponse, Error>,
14576    ) -> Result<(), fidl::Error> {
14577        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
14578            NodeIsAlternateForResponse,
14579            Error,
14580        >>(
14581            fidl::encoding::FlexibleResult::new(result),
14582            self.tx_id,
14583            0x3a58e00157e0825,
14584            fidl::encoding::DynamicFlags::FLEXIBLE,
14585        )
14586    }
14587}
14588
14589#[must_use = "FIDL methods require a response to be sent"]
14590#[derive(Debug)]
14591pub struct NodeGetBufferCollectionIdResponder {
14592    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14593    tx_id: u32,
14594}
14595
14596/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14597/// if the responder is dropped without sending a response, so that the client
14598/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14599impl std::ops::Drop for NodeGetBufferCollectionIdResponder {
14600    fn drop(&mut self) {
14601        self.control_handle.shutdown();
14602        // Safety: drops once, never accessed again
14603        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14604    }
14605}
14606
14607impl fidl::endpoints::Responder for NodeGetBufferCollectionIdResponder {
14608    type ControlHandle = NodeControlHandle;
14609
14610    fn control_handle(&self) -> &NodeControlHandle {
14611        &self.control_handle
14612    }
14613
14614    fn drop_without_shutdown(mut self) {
14615        // Safety: drops once, never accessed again due to mem::forget
14616        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14617        // Prevent Drop from running (which would shut down the channel)
14618        std::mem::forget(self);
14619    }
14620}
14621
14622impl NodeGetBufferCollectionIdResponder {
14623    /// Sends a response to the FIDL transaction.
14624    ///
14625    /// Sets the channel to shutdown if an error occurs.
14626    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14627        let _result = self.send_raw(payload);
14628        if _result.is_err() {
14629            self.control_handle.shutdown();
14630        }
14631        self.drop_without_shutdown();
14632        _result
14633    }
14634
14635    /// Similar to "send" but does not shutdown the channel if an error occurs.
14636    pub fn send_no_shutdown_on_err(
14637        self,
14638        mut payload: &NodeGetBufferCollectionIdResponse,
14639    ) -> Result<(), fidl::Error> {
14640        let _result = self.send_raw(payload);
14641        self.drop_without_shutdown();
14642        _result
14643    }
14644
14645    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14646        self.control_handle
14647            .inner
14648            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
14649                fidl::encoding::Flexible::new(payload),
14650                self.tx_id,
14651                0x77d19a494b78ba8c,
14652                fidl::encoding::DynamicFlags::FLEXIBLE,
14653            )
14654    }
14655}
14656
14657#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
14658pub struct SecureMemMarker;
14659
14660impl fidl::endpoints::ProtocolMarker for SecureMemMarker {
14661    type Proxy = SecureMemProxy;
14662    type RequestStream = SecureMemRequestStream;
14663    #[cfg(target_os = "fuchsia")]
14664    type SynchronousProxy = SecureMemSynchronousProxy;
14665
14666    const DEBUG_NAME: &'static str = "(anonymous) SecureMem";
14667}
14668pub type SecureMemGetPhysicalSecureHeapsResult =
14669    Result<SecureMemGetPhysicalSecureHeapsResponse, Error>;
14670pub type SecureMemGetDynamicSecureHeapsResult =
14671    Result<SecureMemGetDynamicSecureHeapsResponse, Error>;
14672pub type SecureMemGetPhysicalSecureHeapPropertiesResult =
14673    Result<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>;
14674pub type SecureMemAddSecureHeapPhysicalRangeResult = Result<(), Error>;
14675pub type SecureMemDeleteSecureHeapPhysicalRangeResult = Result<(), Error>;
14676pub type SecureMemModifySecureHeapPhysicalRangeResult = Result<(), Error>;
14677pub type SecureMemZeroSubRangeResult = Result<(), Error>;
14678
14679pub trait SecureMemProxyInterface: Send + Sync {
14680    type GetPhysicalSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error>>
14681        + Send;
14682    fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut;
14683    type GetDynamicSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error>>
14684        + Send;
14685    fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut;
14686    type GetPhysicalSecureHeapPropertiesResponseFut: std::future::Future<
14687            Output = Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error>,
14688        > + Send;
14689    fn r#get_physical_secure_heap_properties(
14690        &self,
14691        payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14692    ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut;
14693    type AddSecureHeapPhysicalRangeResponseFut: std::future::Future<Output = Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error>>
14694        + Send;
14695    fn r#add_secure_heap_physical_range(
14696        &self,
14697        payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
14698    ) -> Self::AddSecureHeapPhysicalRangeResponseFut;
14699    type DeleteSecureHeapPhysicalRangeResponseFut: std::future::Future<
14700            Output = Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error>,
14701        > + Send;
14702    fn r#delete_secure_heap_physical_range(
14703        &self,
14704        payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
14705    ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut;
14706    type ModifySecureHeapPhysicalRangeResponseFut: std::future::Future<
14707            Output = Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error>,
14708        > + Send;
14709    fn r#modify_secure_heap_physical_range(
14710        &self,
14711        payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
14712    ) -> Self::ModifySecureHeapPhysicalRangeResponseFut;
14713    type ZeroSubRangeResponseFut: std::future::Future<Output = Result<SecureMemZeroSubRangeResult, fidl::Error>>
14714        + Send;
14715    fn r#zero_sub_range(
14716        &self,
14717        payload: &SecureMemZeroSubRangeRequest,
14718    ) -> Self::ZeroSubRangeResponseFut;
14719}
14720#[derive(Debug)]
14721#[cfg(target_os = "fuchsia")]
14722pub struct SecureMemSynchronousProxy {
14723    client: fidl::client::sync::Client,
14724}
14725
14726#[cfg(target_os = "fuchsia")]
14727impl fidl::endpoints::SynchronousProxy for SecureMemSynchronousProxy {
14728    type Proxy = SecureMemProxy;
14729    type Protocol = SecureMemMarker;
14730
14731    fn from_channel(inner: fidl::Channel) -> Self {
14732        Self::new(inner)
14733    }
14734
14735    fn into_channel(self) -> fidl::Channel {
14736        self.client.into_channel()
14737    }
14738
14739    fn as_channel(&self) -> &fidl::Channel {
14740        self.client.as_channel()
14741    }
14742}
14743
14744#[cfg(target_os = "fuchsia")]
14745impl SecureMemSynchronousProxy {
14746    pub fn new(channel: fidl::Channel) -> Self {
14747        let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
14748        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
14749    }
14750
14751    pub fn into_channel(self) -> fidl::Channel {
14752        self.client.into_channel()
14753    }
14754
14755    /// Waits until an event arrives and returns it. It is safe for other
14756    /// threads to make concurrent requests while waiting for an event.
14757    pub fn wait_for_event(
14758        &self,
14759        deadline: zx::MonotonicInstant,
14760    ) -> Result<SecureMemEvent, fidl::Error> {
14761        SecureMemEvent::decode(self.client.wait_for_event(deadline)?)
14762    }
14763
14764    /// Gets the physical address and length of any secure heap whose physical
14765    /// range is configured via the TEE.
14766    ///
14767    /// Presently, these will be fixed physical addresses and lengths, with the
14768    /// location plumbed via the TEE.
14769    ///
14770    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
14771    /// when there isn't any special heap-specific per-VMO setup or teardown
14772    /// required.
14773    ///
14774    /// The physical range must be secured/protected by the TEE before the
14775    /// securemem driver responds to this request with success.
14776    ///
14777    /// Sysmem should only call this once.  Returning zero heaps is not a
14778    /// failure.
14779    ///
14780    /// Errors:
14781    ///  * PROTOCOL_DEVIATION - called more than once.
14782    ///  * UNSPECIFIED - generic internal error (such as in communication
14783    ///    with TEE which doesn't generate zx_status_t errors).
14784    ///  * other errors are allowed; any other errors should be treated the same
14785    ///    as UNSPECIFIED.
14786    pub fn r#get_physical_secure_heaps(
14787        &self,
14788        ___deadline: zx::MonotonicInstant,
14789    ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
14790        let _response = self.client.send_query::<
14791            fidl::encoding::EmptyPayload,
14792            fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
14793        >(
14794            (),
14795            0x38716300592073e3,
14796            fidl::encoding::DynamicFlags::FLEXIBLE,
14797            ___deadline,
14798        )?
14799        .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
14800        Ok(_response.map(|x| x))
14801    }
14802
14803    /// Gets information about any secure heaps whose physical pages are not
14804    /// configured by the TEE, but by sysmem.
14805    ///
14806    /// Sysmem should only call this once. Returning zero heaps is not a
14807    /// failure.
14808    ///
14809    /// Errors:
14810    ///  * PROTOCOL_DEVIATION - called more than once.
14811    ///  * UNSPECIFIED - generic internal error (such as in communication
14812    ///    with TEE which doesn't generate zx_status_t errors).
14813    ///  * other errors are allowed; any other errors should be treated the same
14814    ///    as UNSPECIFIED.
14815    pub fn r#get_dynamic_secure_heaps(
14816        &self,
14817        ___deadline: zx::MonotonicInstant,
14818    ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
14819        let _response = self.client.send_query::<
14820            fidl::encoding::EmptyPayload,
14821            fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
14822        >(
14823            (),
14824            0x1190847f99952834,
14825            fidl::encoding::DynamicFlags::FLEXIBLE,
14826            ___deadline,
14827        )?
14828        .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
14829        Ok(_response.map(|x| x))
14830    }
14831
14832    /// This request from sysmem to the securemem driver gets the properties of
14833    /// a protected/secure heap.
14834    ///
14835    /// This only handles heaps with a single contiguous physical extent.
14836    ///
14837    /// The heap's entire physical range is indicated in case this request needs
14838    /// some physical space to auto-detect how many ranges are REE-usable.  Any
14839    /// temporary HW protection ranges will be deleted before this request
14840    /// completes.
14841    ///
14842    /// Errors:
14843    ///  * UNSPECIFIED - generic internal error (such as in communication
14844    ///    with TEE which doesn't generate zx_status_t errors).
14845    ///  * other errors are allowed; any other errors should be treated the same
14846    ///    as UNSPECIFIED.
14847    pub fn r#get_physical_secure_heap_properties(
14848        &self,
14849        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14850        ___deadline: zx::MonotonicInstant,
14851    ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
14852        let _response = self.client.send_query::<
14853            SecureMemGetPhysicalSecureHeapPropertiesRequest,
14854            fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
14855        >(
14856            payload,
14857            0xc6f06889009c7bc,
14858            fidl::encoding::DynamicFlags::FLEXIBLE,
14859            ___deadline,
14860        )?
14861        .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
14862        Ok(_response.map(|x| x))
14863    }
14864
14865    /// This request from sysmem to the securemem driver conveys a physical
14866    /// range to add, for a heap whose physical range(s) are set up via
14867    /// sysmem.
14868    ///
14869    /// Only sysmem can call this because only sysmem is handed the client end
14870    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
14871    /// securemem driver is the server end of this protocol.
14872    ///
14873    /// The securemem driver must configure all the covered offsets as protected
14874    /// before responding to this message with success.
14875    ///
14876    /// On failure, the securemem driver must ensure the protected range was not
14877    /// created.
14878    ///
14879    /// Sysmem must only call this up to once if dynamic_protection_ranges
14880    /// false.
14881    ///
14882    /// If dynamic_protection_ranges is true, sysmem can call this multiple
14883    /// times as long as the current number of ranges never exceeds
14884    /// max_protected_range_count.
14885    ///
14886    /// The caller must not attempt to add a range that matches an
14887    /// already-existing range.  Added ranges can overlap each other as long as
14888    /// no two ranges match exactly.
14889    ///
14890    /// Errors:
14891    ///   * PROTOCOL_DEVIATION - called more than once when
14892    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
14893    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
14894    ///     range that doesn't conform to protected_range_granularity. See log.
14895    ///   * UNSPECIFIED - generic internal error (such as in communication
14896    ///     with TEE which doesn't generate zx_status_t errors).
14897    ///   * other errors are possible, such as from communication failures or
14898    ///     server propagation of failures.
14899    pub fn r#add_secure_heap_physical_range(
14900        &self,
14901        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
14902        ___deadline: zx::MonotonicInstant,
14903    ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
14904        let _response = self.client.send_query::<
14905            SecureMemAddSecureHeapPhysicalRangeRequest,
14906            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
14907        >(
14908            payload,
14909            0x35f695b9b6c7217a,
14910            fidl::encoding::DynamicFlags::FLEXIBLE,
14911            ___deadline,
14912        )?
14913        .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
14914        Ok(_response.map(|x| x))
14915    }
14916
14917    /// This request from sysmem to the securemem driver conveys a physical
14918    /// range to delete, for a heap whose physical range(s) are set up via
14919    /// sysmem.
14920    ///
14921    /// Only sysmem can call this because only sysmem is handed the client end
14922    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
14923    /// securemem driver is the server end of this protocol.
14924    ///
14925    /// The securemem driver must configure all the covered offsets as not
14926    /// protected before responding to this message with success.
14927    ///
14928    /// On failure, the securemem driver must ensure the protected range was not
14929    /// deleted.
14930    ///
14931    /// Sysmem must not call this if dynamic_protection_ranges false.
14932    ///
14933    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
14934    /// on various ranges that exist at the time of the call.
14935    ///
14936    /// If any portion of the range being deleted is not also covered by another
14937    /// protected range, then any ongoing DMA to any part of the entire range
14938    /// may be interrupted / may fail, potentially in a way that's disruptive to
14939    /// the entire system (bus lockup or similar, depending on device details).
14940    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
14941    /// any portion of the range being deleted, unless the caller has other
14942    /// active ranges covering every block of the range being deleted.  Ongoing
14943    /// DMA to/from blocks outside the range being deleted is never impacted by
14944    /// the deletion.
14945    ///
14946    /// Errors:
14947    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
14948    ///     Unexpected heap, or range that doesn't conform to
14949    ///     protected_range_granularity.
14950    ///   * UNSPECIFIED - generic internal error (such as in communication
14951    ///     with TEE which doesn't generate zx_status_t errors).
14952    ///   * NOT_FOUND - the specified range is not found.
14953    ///   * other errors are possible, such as from communication failures or
14954    ///     server propagation of failures.
14955    pub fn r#delete_secure_heap_physical_range(
14956        &self,
14957        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
14958        ___deadline: zx::MonotonicInstant,
14959    ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
14960        let _response = self.client.send_query::<
14961            SecureMemDeleteSecureHeapPhysicalRangeRequest,
14962            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
14963        >(
14964            payload,
14965            0xeaa58c650264c9e,
14966            fidl::encoding::DynamicFlags::FLEXIBLE,
14967            ___deadline,
14968        )?
14969        .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
14970        Ok(_response.map(|x| x))
14971    }
14972
14973    /// This request from sysmem to the securemem driver conveys a physical
14974    /// range to modify and its new base and length, for a heap whose physical
14975    /// range(s) are set up via sysmem.
14976    ///
14977    /// Only sysmem can call this because only sysmem is handed the client end
14978    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
14979    /// securemem driver is the server end of this protocol.
14980    ///
14981    /// The securemem driver must configure the range to cover only the new
14982    /// offsets before responding to this message with success.
14983    ///
14984    /// On failure, the securemem driver must ensure the range was not changed.
14985    ///
14986    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
14987    /// must not call this if !is_mod_protected_range_available.
14988    ///
14989    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
14990    /// on various ranges that exist at the time of the call.
14991    ///
14992    /// The range must only be modified at one end or the other, but not both.
14993    /// If the range is getting shorter, and the un-covered blocks are not
14994    /// covered by other active ranges, any ongoing DMA to the entire range
14995    /// that's geting shorter may fail in a way that disrupts the entire system
14996    /// (bus lockup or similar), so the caller must ensure that no DMA is
14997    /// ongoing to any portion of a range that is getting shorter, unless the
14998    /// blocks being un-covered by the modification to this range are all
14999    /// covered by other active ranges, in which case no disruption to ongoing
15000    /// DMA will occur.
15001    ///
15002    /// If a range is modified to become <= zero length, the range is deleted.
15003    ///
15004    /// Errors:
15005    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15006    ///     Unexpected heap, or old_range or new_range that doesn't conform to
15007    ///     protected_range_granularity, or old_range and new_range differ in
15008    ///     both begin and end (disallowed).
15009    ///   * UNSPECIFIED - generic internal error (such as in communication
15010    ///     with TEE which doesn't generate zx_status_t errors).
15011    ///   * NOT_FOUND - the specified range is not found.
15012    ///   * other errors are possible, such as from communication failures or
15013    ///     server propagation of failures.
15014    pub fn r#modify_secure_heap_physical_range(
15015        &self,
15016        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15017        ___deadline: zx::MonotonicInstant,
15018    ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15019        let _response = self.client.send_query::<
15020            SecureMemModifySecureHeapPhysicalRangeRequest,
15021            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15022        >(
15023            payload,
15024            0x60b7448aa1187734,
15025            fidl::encoding::DynamicFlags::FLEXIBLE,
15026            ___deadline,
15027        )?
15028        .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15029        Ok(_response.map(|x| x))
15030    }
15031
15032    /// Zero a sub-range of a currently-existing physical range added via
15033    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
15034    /// exactly one physical range, and must not overlap with any other
15035    /// physical range.
15036    ///
15037    /// is_covering_range_explicit - When true, the covering range must be one
15038    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15039    ///     possibly modified since.  When false, the covering range must not
15040    ///     be one of the ranges explicitly created via
15041    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
15042    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
15043    ///     covering range is typically the entire physical range (or a range
15044    ///     which covers even more) of a heap configured by the TEE and whose
15045    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15046    ///
15047    /// Ongoing DMA is not disrupted by this request.
15048    ///
15049    /// Errors:
15050    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15051    ///     Unexpected heap.
15052    ///   * UNSPECIFIED - generic internal error (such as in communication
15053    ///     with TEE which doesn't generate zx_status_t errors).
15054    ///   * other errors are possible, such as from communication failures or
15055    ///     server propagation of failures.
15056    pub fn r#zero_sub_range(
15057        &self,
15058        mut payload: &SecureMemZeroSubRangeRequest,
15059        ___deadline: zx::MonotonicInstant,
15060    ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15061        let _response = self.client.send_query::<
15062            SecureMemZeroSubRangeRequest,
15063            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15064        >(
15065            payload,
15066            0x5b25b7901a385ce5,
15067            fidl::encoding::DynamicFlags::FLEXIBLE,
15068            ___deadline,
15069        )?
15070        .into_result::<SecureMemMarker>("zero_sub_range")?;
15071        Ok(_response.map(|x| x))
15072    }
15073}
15074
15075#[derive(Debug, Clone)]
15076pub struct SecureMemProxy {
15077    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
15078}
15079
15080impl fidl::endpoints::Proxy for SecureMemProxy {
15081    type Protocol = SecureMemMarker;
15082
15083    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
15084        Self::new(inner)
15085    }
15086
15087    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
15088        self.client.into_channel().map_err(|client| Self { client })
15089    }
15090
15091    fn as_channel(&self) -> &::fidl::AsyncChannel {
15092        self.client.as_channel()
15093    }
15094}
15095
15096impl SecureMemProxy {
15097    /// Create a new Proxy for fuchsia.sysmem2/SecureMem.
15098    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
15099        let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
15100        Self { client: fidl::client::Client::new(channel, protocol_name) }
15101    }
15102
15103    /// Get a Stream of events from the remote end of the protocol.
15104    ///
15105    /// # Panics
15106    ///
15107    /// Panics if the event stream was already taken.
15108    pub fn take_event_stream(&self) -> SecureMemEventStream {
15109        SecureMemEventStream { event_receiver: self.client.take_event_receiver() }
15110    }
15111
15112    /// Gets the physical address and length of any secure heap whose physical
15113    /// range is configured via the TEE.
15114    ///
15115    /// Presently, these will be fixed physical addresses and lengths, with the
15116    /// location plumbed via the TEE.
15117    ///
15118    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
15119    /// when there isn't any special heap-specific per-VMO setup or teardown
15120    /// required.
15121    ///
15122    /// The physical range must be secured/protected by the TEE before the
15123    /// securemem driver responds to this request with success.
15124    ///
15125    /// Sysmem should only call this once.  Returning zero heaps is not a
15126    /// failure.
15127    ///
15128    /// Errors:
15129    ///  * PROTOCOL_DEVIATION - called more than once.
15130    ///  * UNSPECIFIED - generic internal error (such as in communication
15131    ///    with TEE which doesn't generate zx_status_t errors).
15132    ///  * other errors are allowed; any other errors should be treated the same
15133    ///    as UNSPECIFIED.
15134    pub fn r#get_physical_secure_heaps(
15135        &self,
15136    ) -> fidl::client::QueryResponseFut<
15137        SecureMemGetPhysicalSecureHeapsResult,
15138        fidl::encoding::DefaultFuchsiaResourceDialect,
15139    > {
15140        SecureMemProxyInterface::r#get_physical_secure_heaps(self)
15141    }
15142
15143    /// Gets information about any secure heaps whose physical pages are not
15144    /// configured by the TEE, but by sysmem.
15145    ///
15146    /// Sysmem should only call this once. Returning zero heaps is not a
15147    /// failure.
15148    ///
15149    /// Errors:
15150    ///  * PROTOCOL_DEVIATION - called more than once.
15151    ///  * UNSPECIFIED - generic internal error (such as in communication
15152    ///    with TEE which doesn't generate zx_status_t errors).
15153    ///  * other errors are allowed; any other errors should be treated the same
15154    ///    as UNSPECIFIED.
15155    pub fn r#get_dynamic_secure_heaps(
15156        &self,
15157    ) -> fidl::client::QueryResponseFut<
15158        SecureMemGetDynamicSecureHeapsResult,
15159        fidl::encoding::DefaultFuchsiaResourceDialect,
15160    > {
15161        SecureMemProxyInterface::r#get_dynamic_secure_heaps(self)
15162    }
15163
15164    /// This request from sysmem to the securemem driver gets the properties of
15165    /// a protected/secure heap.
15166    ///
15167    /// This only handles heaps with a single contiguous physical extent.
15168    ///
15169    /// The heap's entire physical range is indicated in case this request needs
15170    /// some physical space to auto-detect how many ranges are REE-usable.  Any
15171    /// temporary HW protection ranges will be deleted before this request
15172    /// completes.
15173    ///
15174    /// Errors:
15175    ///  * UNSPECIFIED - generic internal error (such as in communication
15176    ///    with TEE which doesn't generate zx_status_t errors).
15177    ///  * other errors are allowed; any other errors should be treated the same
15178    ///    as UNSPECIFIED.
15179    pub fn r#get_physical_secure_heap_properties(
15180        &self,
15181        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15182    ) -> fidl::client::QueryResponseFut<
15183        SecureMemGetPhysicalSecureHeapPropertiesResult,
15184        fidl::encoding::DefaultFuchsiaResourceDialect,
15185    > {
15186        SecureMemProxyInterface::r#get_physical_secure_heap_properties(self, payload)
15187    }
15188
15189    /// This request from sysmem to the securemem driver conveys a physical
15190    /// range to add, for a heap whose physical range(s) are set up via
15191    /// sysmem.
15192    ///
15193    /// Only sysmem can call this because only sysmem is handed the client end
15194    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15195    /// securemem driver is the server end of this protocol.
15196    ///
15197    /// The securemem driver must configure all the covered offsets as protected
15198    /// before responding to this message with success.
15199    ///
15200    /// On failure, the securemem driver must ensure the protected range was not
15201    /// created.
15202    ///
15203    /// Sysmem must only call this up to once if dynamic_protection_ranges
15204    /// false.
15205    ///
15206    /// If dynamic_protection_ranges is true, sysmem can call this multiple
15207    /// times as long as the current number of ranges never exceeds
15208    /// max_protected_range_count.
15209    ///
15210    /// The caller must not attempt to add a range that matches an
15211    /// already-existing range.  Added ranges can overlap each other as long as
15212    /// no two ranges match exactly.
15213    ///
15214    /// Errors:
15215    ///   * PROTOCOL_DEVIATION - called more than once when
15216    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
15217    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
15218    ///     range that doesn't conform to protected_range_granularity. See log.
15219    ///   * UNSPECIFIED - generic internal error (such as in communication
15220    ///     with TEE which doesn't generate zx_status_t errors).
15221    ///   * other errors are possible, such as from communication failures or
15222    ///     server propagation of failures.
15223    pub fn r#add_secure_heap_physical_range(
15224        &self,
15225        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15226    ) -> fidl::client::QueryResponseFut<
15227        SecureMemAddSecureHeapPhysicalRangeResult,
15228        fidl::encoding::DefaultFuchsiaResourceDialect,
15229    > {
15230        SecureMemProxyInterface::r#add_secure_heap_physical_range(self, payload)
15231    }
15232
15233    /// This request from sysmem to the securemem driver conveys a physical
15234    /// range to delete, for a heap whose physical range(s) are set up via
15235    /// sysmem.
15236    ///
15237    /// Only sysmem can call this because only sysmem is handed the client end
15238    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15239    /// securemem driver is the server end of this protocol.
15240    ///
15241    /// The securemem driver must configure all the covered offsets as not
15242    /// protected before responding to this message with success.
15243    ///
15244    /// On failure, the securemem driver must ensure the protected range was not
15245    /// deleted.
15246    ///
15247    /// Sysmem must not call this if dynamic_protection_ranges false.
15248    ///
15249    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15250    /// on various ranges that exist at the time of the call.
15251    ///
15252    /// If any portion of the range being deleted is not also covered by another
15253    /// protected range, then any ongoing DMA to any part of the entire range
15254    /// may be interrupted / may fail, potentially in a way that's disruptive to
15255    /// the entire system (bus lockup or similar, depending on device details).
15256    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15257    /// any portion of the range being deleted, unless the caller has other
15258    /// active ranges covering every block of the range being deleted.  Ongoing
15259    /// DMA to/from blocks outside the range being deleted is never impacted by
15260    /// the deletion.
15261    ///
15262    /// Errors:
15263    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15264    ///     Unexpected heap, or range that doesn't conform to
15265    ///     protected_range_granularity.
15266    ///   * UNSPECIFIED - generic internal error (such as in communication
15267    ///     with TEE which doesn't generate zx_status_t errors).
15268    ///   * NOT_FOUND - the specified range is not found.
15269    ///   * other errors are possible, such as from communication failures or
15270    ///     server propagation of failures.
15271    pub fn r#delete_secure_heap_physical_range(
15272        &self,
15273        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15274    ) -> fidl::client::QueryResponseFut<
15275        SecureMemDeleteSecureHeapPhysicalRangeResult,
15276        fidl::encoding::DefaultFuchsiaResourceDialect,
15277    > {
15278        SecureMemProxyInterface::r#delete_secure_heap_physical_range(self, payload)
15279    }
15280
15281    /// This request from sysmem to the securemem driver conveys a physical
15282    /// range to modify and its new base and length, for a heap whose physical
15283    /// range(s) are set up via sysmem.
15284    ///
15285    /// Only sysmem can call this because only sysmem is handed the client end
15286    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15287    /// securemem driver is the server end of this protocol.
15288    ///
15289    /// The securemem driver must configure the range to cover only the new
15290    /// offsets before responding to this message with success.
15291    ///
15292    /// On failure, the securemem driver must ensure the range was not changed.
15293    ///
15294    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
15295    /// must not call this if !is_mod_protected_range_available.
15296    ///
15297    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15298    /// on various ranges that exist at the time of the call.
15299    ///
15300    /// The range must only be modified at one end or the other, but not both.
15301    /// If the range is getting shorter, and the un-covered blocks are not
15302    /// covered by other active ranges, any ongoing DMA to the entire range
15303    /// that's geting shorter may fail in a way that disrupts the entire system
15304    /// (bus lockup or similar), so the caller must ensure that no DMA is
15305    /// ongoing to any portion of a range that is getting shorter, unless the
15306    /// blocks being un-covered by the modification to this range are all
15307    /// covered by other active ranges, in which case no disruption to ongoing
15308    /// DMA will occur.
15309    ///
15310    /// If a range is modified to become <= zero length, the range is deleted.
15311    ///
15312    /// Errors:
15313    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15314    ///     Unexpected heap, or old_range or new_range that doesn't conform to
15315    ///     protected_range_granularity, or old_range and new_range differ in
15316    ///     both begin and end (disallowed).
15317    ///   * UNSPECIFIED - generic internal error (such as in communication
15318    ///     with TEE which doesn't generate zx_status_t errors).
15319    ///   * NOT_FOUND - the specified range is not found.
15320    ///   * other errors are possible, such as from communication failures or
15321    ///     server propagation of failures.
15322    pub fn r#modify_secure_heap_physical_range(
15323        &self,
15324        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15325    ) -> fidl::client::QueryResponseFut<
15326        SecureMemModifySecureHeapPhysicalRangeResult,
15327        fidl::encoding::DefaultFuchsiaResourceDialect,
15328    > {
15329        SecureMemProxyInterface::r#modify_secure_heap_physical_range(self, payload)
15330    }
15331
15332    /// Zero a sub-range of a currently-existing physical range added via
15333    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
15334    /// exactly one physical range, and must not overlap with any other
15335    /// physical range.
15336    ///
15337    /// is_covering_range_explicit - When true, the covering range must be one
15338    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15339    ///     possibly modified since.  When false, the covering range must not
15340    ///     be one of the ranges explicitly created via
15341    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
15342    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
15343    ///     covering range is typically the entire physical range (or a range
15344    ///     which covers even more) of a heap configured by the TEE and whose
15345    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15346    ///
15347    /// Ongoing DMA is not disrupted by this request.
15348    ///
15349    /// Errors:
15350    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15351    ///     Unexpected heap.
15352    ///   * UNSPECIFIED - generic internal error (such as in communication
15353    ///     with TEE which doesn't generate zx_status_t errors).
15354    ///   * other errors are possible, such as from communication failures or
15355    ///     server propagation of failures.
15356    pub fn r#zero_sub_range(
15357        &self,
15358        mut payload: &SecureMemZeroSubRangeRequest,
15359    ) -> fidl::client::QueryResponseFut<
15360        SecureMemZeroSubRangeResult,
15361        fidl::encoding::DefaultFuchsiaResourceDialect,
15362    > {
15363        SecureMemProxyInterface::r#zero_sub_range(self, payload)
15364    }
15365}
15366
15367impl SecureMemProxyInterface for SecureMemProxy {
15368    type GetPhysicalSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15369        SecureMemGetPhysicalSecureHeapsResult,
15370        fidl::encoding::DefaultFuchsiaResourceDialect,
15371    >;
15372    fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut {
15373        fn _decode(
15374            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15375        ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
15376            let _response = fidl::client::decode_transaction_body::<
15377                fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
15378                fidl::encoding::DefaultFuchsiaResourceDialect,
15379                0x38716300592073e3,
15380            >(_buf?)?
15381            .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
15382            Ok(_response.map(|x| x))
15383        }
15384        self.client.send_query_and_decode::<
15385            fidl::encoding::EmptyPayload,
15386            SecureMemGetPhysicalSecureHeapsResult,
15387        >(
15388            (),
15389            0x38716300592073e3,
15390            fidl::encoding::DynamicFlags::FLEXIBLE,
15391            _decode,
15392        )
15393    }
15394
15395    type GetDynamicSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15396        SecureMemGetDynamicSecureHeapsResult,
15397        fidl::encoding::DefaultFuchsiaResourceDialect,
15398    >;
15399    fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut {
15400        fn _decode(
15401            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15402        ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
15403            let _response = fidl::client::decode_transaction_body::<
15404                fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
15405                fidl::encoding::DefaultFuchsiaResourceDialect,
15406                0x1190847f99952834,
15407            >(_buf?)?
15408            .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
15409            Ok(_response.map(|x| x))
15410        }
15411        self.client.send_query_and_decode::<
15412            fidl::encoding::EmptyPayload,
15413            SecureMemGetDynamicSecureHeapsResult,
15414        >(
15415            (),
15416            0x1190847f99952834,
15417            fidl::encoding::DynamicFlags::FLEXIBLE,
15418            _decode,
15419        )
15420    }
15421
15422    type GetPhysicalSecureHeapPropertiesResponseFut = fidl::client::QueryResponseFut<
15423        SecureMemGetPhysicalSecureHeapPropertiesResult,
15424        fidl::encoding::DefaultFuchsiaResourceDialect,
15425    >;
15426    fn r#get_physical_secure_heap_properties(
15427        &self,
15428        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15429    ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut {
15430        fn _decode(
15431            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15432        ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
15433            let _response = fidl::client::decode_transaction_body::<
15434                fidl::encoding::FlexibleResultType<
15435                    SecureMemGetPhysicalSecureHeapPropertiesResponse,
15436                    Error,
15437                >,
15438                fidl::encoding::DefaultFuchsiaResourceDialect,
15439                0xc6f06889009c7bc,
15440            >(_buf?)?
15441            .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
15442            Ok(_response.map(|x| x))
15443        }
15444        self.client.send_query_and_decode::<
15445            SecureMemGetPhysicalSecureHeapPropertiesRequest,
15446            SecureMemGetPhysicalSecureHeapPropertiesResult,
15447        >(
15448            payload,
15449            0xc6f06889009c7bc,
15450            fidl::encoding::DynamicFlags::FLEXIBLE,
15451            _decode,
15452        )
15453    }
15454
15455    type AddSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15456        SecureMemAddSecureHeapPhysicalRangeResult,
15457        fidl::encoding::DefaultFuchsiaResourceDialect,
15458    >;
15459    fn r#add_secure_heap_physical_range(
15460        &self,
15461        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15462    ) -> Self::AddSecureHeapPhysicalRangeResponseFut {
15463        fn _decode(
15464            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15465        ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15466            let _response = fidl::client::decode_transaction_body::<
15467                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15468                fidl::encoding::DefaultFuchsiaResourceDialect,
15469                0x35f695b9b6c7217a,
15470            >(_buf?)?
15471            .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15472            Ok(_response.map(|x| x))
15473        }
15474        self.client.send_query_and_decode::<
15475            SecureMemAddSecureHeapPhysicalRangeRequest,
15476            SecureMemAddSecureHeapPhysicalRangeResult,
15477        >(
15478            payload,
15479            0x35f695b9b6c7217a,
15480            fidl::encoding::DynamicFlags::FLEXIBLE,
15481            _decode,
15482        )
15483    }
15484
15485    type DeleteSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15486        SecureMemDeleteSecureHeapPhysicalRangeResult,
15487        fidl::encoding::DefaultFuchsiaResourceDialect,
15488    >;
15489    fn r#delete_secure_heap_physical_range(
15490        &self,
15491        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15492    ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut {
15493        fn _decode(
15494            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15495        ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15496            let _response = fidl::client::decode_transaction_body::<
15497                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15498                fidl::encoding::DefaultFuchsiaResourceDialect,
15499                0xeaa58c650264c9e,
15500            >(_buf?)?
15501            .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15502            Ok(_response.map(|x| x))
15503        }
15504        self.client.send_query_and_decode::<
15505            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15506            SecureMemDeleteSecureHeapPhysicalRangeResult,
15507        >(
15508            payload,
15509            0xeaa58c650264c9e,
15510            fidl::encoding::DynamicFlags::FLEXIBLE,
15511            _decode,
15512        )
15513    }
15514
15515    type ModifySecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15516        SecureMemModifySecureHeapPhysicalRangeResult,
15517        fidl::encoding::DefaultFuchsiaResourceDialect,
15518    >;
15519    fn r#modify_secure_heap_physical_range(
15520        &self,
15521        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15522    ) -> Self::ModifySecureHeapPhysicalRangeResponseFut {
15523        fn _decode(
15524            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15525        ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15526            let _response = fidl::client::decode_transaction_body::<
15527                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15528                fidl::encoding::DefaultFuchsiaResourceDialect,
15529                0x60b7448aa1187734,
15530            >(_buf?)?
15531            .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15532            Ok(_response.map(|x| x))
15533        }
15534        self.client.send_query_and_decode::<
15535            SecureMemModifySecureHeapPhysicalRangeRequest,
15536            SecureMemModifySecureHeapPhysicalRangeResult,
15537        >(
15538            payload,
15539            0x60b7448aa1187734,
15540            fidl::encoding::DynamicFlags::FLEXIBLE,
15541            _decode,
15542        )
15543    }
15544
15545    type ZeroSubRangeResponseFut = fidl::client::QueryResponseFut<
15546        SecureMemZeroSubRangeResult,
15547        fidl::encoding::DefaultFuchsiaResourceDialect,
15548    >;
15549    fn r#zero_sub_range(
15550        &self,
15551        mut payload: &SecureMemZeroSubRangeRequest,
15552    ) -> Self::ZeroSubRangeResponseFut {
15553        fn _decode(
15554            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15555        ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15556            let _response = fidl::client::decode_transaction_body::<
15557                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15558                fidl::encoding::DefaultFuchsiaResourceDialect,
15559                0x5b25b7901a385ce5,
15560            >(_buf?)?
15561            .into_result::<SecureMemMarker>("zero_sub_range")?;
15562            Ok(_response.map(|x| x))
15563        }
15564        self.client
15565            .send_query_and_decode::<SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResult>(
15566                payload,
15567                0x5b25b7901a385ce5,
15568                fidl::encoding::DynamicFlags::FLEXIBLE,
15569                _decode,
15570            )
15571    }
15572}
15573
15574pub struct SecureMemEventStream {
15575    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
15576}
15577
15578impl std::marker::Unpin for SecureMemEventStream {}
15579
15580impl futures::stream::FusedStream for SecureMemEventStream {
15581    fn is_terminated(&self) -> bool {
15582        self.event_receiver.is_terminated()
15583    }
15584}
15585
15586impl futures::Stream for SecureMemEventStream {
15587    type Item = Result<SecureMemEvent, fidl::Error>;
15588
15589    fn poll_next(
15590        mut self: std::pin::Pin<&mut Self>,
15591        cx: &mut std::task::Context<'_>,
15592    ) -> std::task::Poll<Option<Self::Item>> {
15593        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
15594            &mut self.event_receiver,
15595            cx
15596        )?) {
15597            Some(buf) => std::task::Poll::Ready(Some(SecureMemEvent::decode(buf))),
15598            None => std::task::Poll::Ready(None),
15599        }
15600    }
15601}
15602
15603#[derive(Debug)]
15604pub enum SecureMemEvent {
15605    #[non_exhaustive]
15606    _UnknownEvent {
15607        /// Ordinal of the event that was sent.
15608        ordinal: u64,
15609    },
15610}
15611
15612impl SecureMemEvent {
15613    /// Decodes a message buffer as a [`SecureMemEvent`].
15614    fn decode(
15615        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
15616    ) -> Result<SecureMemEvent, fidl::Error> {
15617        let (bytes, _handles) = buf.split_mut();
15618        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15619        debug_assert_eq!(tx_header.tx_id, 0);
15620        match tx_header.ordinal {
15621            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
15622                Ok(SecureMemEvent::_UnknownEvent { ordinal: tx_header.ordinal })
15623            }
15624            _ => Err(fidl::Error::UnknownOrdinal {
15625                ordinal: tx_header.ordinal,
15626                protocol_name: <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15627            }),
15628        }
15629    }
15630}
15631
15632/// A Stream of incoming requests for fuchsia.sysmem2/SecureMem.
15633pub struct SecureMemRequestStream {
15634    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15635    is_terminated: bool,
15636}
15637
15638impl std::marker::Unpin for SecureMemRequestStream {}
15639
15640impl futures::stream::FusedStream for SecureMemRequestStream {
15641    fn is_terminated(&self) -> bool {
15642        self.is_terminated
15643    }
15644}
15645
15646impl fidl::endpoints::RequestStream for SecureMemRequestStream {
15647    type Protocol = SecureMemMarker;
15648    type ControlHandle = SecureMemControlHandle;
15649
15650    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
15651        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
15652    }
15653
15654    fn control_handle(&self) -> Self::ControlHandle {
15655        SecureMemControlHandle { inner: self.inner.clone() }
15656    }
15657
15658    fn into_inner(
15659        self,
15660    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
15661    {
15662        (self.inner, self.is_terminated)
15663    }
15664
15665    fn from_inner(
15666        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15667        is_terminated: bool,
15668    ) -> Self {
15669        Self { inner, is_terminated }
15670    }
15671}
15672
15673impl futures::Stream for SecureMemRequestStream {
15674    type Item = Result<SecureMemRequest, fidl::Error>;
15675
15676    fn poll_next(
15677        mut self: std::pin::Pin<&mut Self>,
15678        cx: &mut std::task::Context<'_>,
15679    ) -> std::task::Poll<Option<Self::Item>> {
15680        let this = &mut *self;
15681        if this.inner.check_shutdown(cx) {
15682            this.is_terminated = true;
15683            return std::task::Poll::Ready(None);
15684        }
15685        if this.is_terminated {
15686            panic!("polled SecureMemRequestStream after completion");
15687        }
15688        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
15689            |bytes, handles| {
15690                match this.inner.channel().read_etc(cx, bytes, handles) {
15691                    std::task::Poll::Ready(Ok(())) => {}
15692                    std::task::Poll::Pending => return std::task::Poll::Pending,
15693                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
15694                        this.is_terminated = true;
15695                        return std::task::Poll::Ready(None);
15696                    }
15697                    std::task::Poll::Ready(Err(e)) => {
15698                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
15699                            e.into(),
15700                        ))))
15701                    }
15702                }
15703
15704                // A message has been received from the channel
15705                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15706
15707                std::task::Poll::Ready(Some(match header.ordinal {
15708                    0x38716300592073e3 => {
15709                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15710                        let mut req = fidl::new_empty!(
15711                            fidl::encoding::EmptyPayload,
15712                            fidl::encoding::DefaultFuchsiaResourceDialect
15713                        );
15714                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15715                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15716                        Ok(SecureMemRequest::GetPhysicalSecureHeaps {
15717                            responder: SecureMemGetPhysicalSecureHeapsResponder {
15718                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15719                                tx_id: header.tx_id,
15720                            },
15721                        })
15722                    }
15723                    0x1190847f99952834 => {
15724                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15725                        let mut req = fidl::new_empty!(
15726                            fidl::encoding::EmptyPayload,
15727                            fidl::encoding::DefaultFuchsiaResourceDialect
15728                        );
15729                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15730                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15731                        Ok(SecureMemRequest::GetDynamicSecureHeaps {
15732                            responder: SecureMemGetDynamicSecureHeapsResponder {
15733                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15734                                tx_id: header.tx_id,
15735                            },
15736                        })
15737                    }
15738                    0xc6f06889009c7bc => {
15739                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15740                        let mut req = fidl::new_empty!(
15741                            SecureMemGetPhysicalSecureHeapPropertiesRequest,
15742                            fidl::encoding::DefaultFuchsiaResourceDialect
15743                        );
15744                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemGetPhysicalSecureHeapPropertiesRequest>(&header, _body_bytes, handles, &mut req)?;
15745                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15746                        Ok(SecureMemRequest::GetPhysicalSecureHeapProperties {
15747                            payload: req,
15748                            responder: SecureMemGetPhysicalSecureHeapPropertiesResponder {
15749                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15750                                tx_id: header.tx_id,
15751                            },
15752                        })
15753                    }
15754                    0x35f695b9b6c7217a => {
15755                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15756                        let mut req = fidl::new_empty!(
15757                            SecureMemAddSecureHeapPhysicalRangeRequest,
15758                            fidl::encoding::DefaultFuchsiaResourceDialect
15759                        );
15760                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemAddSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15761                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15762                        Ok(SecureMemRequest::AddSecureHeapPhysicalRange {
15763                            payload: req,
15764                            responder: SecureMemAddSecureHeapPhysicalRangeResponder {
15765                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15766                                tx_id: header.tx_id,
15767                            },
15768                        })
15769                    }
15770                    0xeaa58c650264c9e => {
15771                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15772                        let mut req = fidl::new_empty!(
15773                            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15774                            fidl::encoding::DefaultFuchsiaResourceDialect
15775                        );
15776                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemDeleteSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15777                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15778                        Ok(SecureMemRequest::DeleteSecureHeapPhysicalRange {
15779                            payload: req,
15780                            responder: SecureMemDeleteSecureHeapPhysicalRangeResponder {
15781                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15782                                tx_id: header.tx_id,
15783                            },
15784                        })
15785                    }
15786                    0x60b7448aa1187734 => {
15787                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15788                        let mut req = fidl::new_empty!(
15789                            SecureMemModifySecureHeapPhysicalRangeRequest,
15790                            fidl::encoding::DefaultFuchsiaResourceDialect
15791                        );
15792                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemModifySecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15793                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15794                        Ok(SecureMemRequest::ModifySecureHeapPhysicalRange {
15795                            payload: req,
15796                            responder: SecureMemModifySecureHeapPhysicalRangeResponder {
15797                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15798                                tx_id: header.tx_id,
15799                            },
15800                        })
15801                    }
15802                    0x5b25b7901a385ce5 => {
15803                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15804                        let mut req = fidl::new_empty!(
15805                            SecureMemZeroSubRangeRequest,
15806                            fidl::encoding::DefaultFuchsiaResourceDialect
15807                        );
15808                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemZeroSubRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15809                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15810                        Ok(SecureMemRequest::ZeroSubRange {
15811                            payload: req,
15812                            responder: SecureMemZeroSubRangeResponder {
15813                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15814                                tx_id: header.tx_id,
15815                            },
15816                        })
15817                    }
15818                    _ if header.tx_id == 0
15819                        && header
15820                            .dynamic_flags()
15821                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15822                    {
15823                        Ok(SecureMemRequest::_UnknownMethod {
15824                            ordinal: header.ordinal,
15825                            control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15826                            method_type: fidl::MethodType::OneWay,
15827                        })
15828                    }
15829                    _ if header
15830                        .dynamic_flags()
15831                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15832                    {
15833                        this.inner.send_framework_err(
15834                            fidl::encoding::FrameworkErr::UnknownMethod,
15835                            header.tx_id,
15836                            header.ordinal,
15837                            header.dynamic_flags(),
15838                            (bytes, handles),
15839                        )?;
15840                        Ok(SecureMemRequest::_UnknownMethod {
15841                            ordinal: header.ordinal,
15842                            control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15843                            method_type: fidl::MethodType::TwoWay,
15844                        })
15845                    }
15846                    _ => Err(fidl::Error::UnknownOrdinal {
15847                        ordinal: header.ordinal,
15848                        protocol_name:
15849                            <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15850                    }),
15851                }))
15852            },
15853        )
15854    }
15855}
15856
15857/// SecureMem
15858///
15859/// The client is sysmem.  The server is securemem driver.
15860///
15861/// TEE - Trusted Execution Environment.
15862///
15863/// REE - Rich Execution Environment.
15864///
15865/// Enables sysmem to call the securemem driver to get any secure heaps
15866/// configured via the TEE (or via the securemem driver), and set any physical
15867/// secure heaps configured via sysmem.
15868///
15869/// Presently, dynamically-allocated secure heaps are configured via sysmem, as
15870/// it starts quite early during boot and can successfully reserve contiguous
15871/// physical memory.  Presently, fixed-location secure heaps are configured via
15872/// TEE, as the plumbing goes from the bootloader to the TEE.  However, this
15873/// protocol intentionally doesn't care which heaps are dynamically-allocated
15874/// and which are fixed-location.
15875#[derive(Debug)]
15876pub enum SecureMemRequest {
15877    /// Gets the physical address and length of any secure heap whose physical
15878    /// range is configured via the TEE.
15879    ///
15880    /// Presently, these will be fixed physical addresses and lengths, with the
15881    /// location plumbed via the TEE.
15882    ///
15883    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
15884    /// when there isn't any special heap-specific per-VMO setup or teardown
15885    /// required.
15886    ///
15887    /// The physical range must be secured/protected by the TEE before the
15888    /// securemem driver responds to this request with success.
15889    ///
15890    /// Sysmem should only call this once.  Returning zero heaps is not a
15891    /// failure.
15892    ///
15893    /// Errors:
15894    ///  * PROTOCOL_DEVIATION - called more than once.
15895    ///  * UNSPECIFIED - generic internal error (such as in communication
15896    ///    with TEE which doesn't generate zx_status_t errors).
15897    ///  * other errors are allowed; any other errors should be treated the same
15898    ///    as UNSPECIFIED.
15899    GetPhysicalSecureHeaps { responder: SecureMemGetPhysicalSecureHeapsResponder },
15900    /// Gets information about any secure heaps whose physical pages are not
15901    /// configured by the TEE, but by sysmem.
15902    ///
15903    /// Sysmem should only call this once. Returning zero heaps is not a
15904    /// failure.
15905    ///
15906    /// Errors:
15907    ///  * PROTOCOL_DEVIATION - called more than once.
15908    ///  * UNSPECIFIED - generic internal error (such as in communication
15909    ///    with TEE which doesn't generate zx_status_t errors).
15910    ///  * other errors are allowed; any other errors should be treated the same
15911    ///    as UNSPECIFIED.
15912    GetDynamicSecureHeaps { responder: SecureMemGetDynamicSecureHeapsResponder },
15913    /// This request from sysmem to the securemem driver gets the properties of
15914    /// a protected/secure heap.
15915    ///
15916    /// This only handles heaps with a single contiguous physical extent.
15917    ///
15918    /// The heap's entire physical range is indicated in case this request needs
15919    /// some physical space to auto-detect how many ranges are REE-usable.  Any
15920    /// temporary HW protection ranges will be deleted before this request
15921    /// completes.
15922    ///
15923    /// Errors:
15924    ///  * UNSPECIFIED - generic internal error (such as in communication
15925    ///    with TEE which doesn't generate zx_status_t errors).
15926    ///  * other errors are allowed; any other errors should be treated the same
15927    ///    as UNSPECIFIED.
15928    GetPhysicalSecureHeapProperties {
15929        payload: SecureMemGetPhysicalSecureHeapPropertiesRequest,
15930        responder: SecureMemGetPhysicalSecureHeapPropertiesResponder,
15931    },
15932    /// This request from sysmem to the securemem driver conveys a physical
15933    /// range to add, for a heap whose physical range(s) are set up via
15934    /// sysmem.
15935    ///
15936    /// Only sysmem can call this because only sysmem is handed the client end
15937    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15938    /// securemem driver is the server end of this protocol.
15939    ///
15940    /// The securemem driver must configure all the covered offsets as protected
15941    /// before responding to this message with success.
15942    ///
15943    /// On failure, the securemem driver must ensure the protected range was not
15944    /// created.
15945    ///
15946    /// Sysmem must only call this up to once if dynamic_protection_ranges
15947    /// false.
15948    ///
15949    /// If dynamic_protection_ranges is true, sysmem can call this multiple
15950    /// times as long as the current number of ranges never exceeds
15951    /// max_protected_range_count.
15952    ///
15953    /// The caller must not attempt to add a range that matches an
15954    /// already-existing range.  Added ranges can overlap each other as long as
15955    /// no two ranges match exactly.
15956    ///
15957    /// Errors:
15958    ///   * PROTOCOL_DEVIATION - called more than once when
15959    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
15960    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
15961    ///     range that doesn't conform to protected_range_granularity. See log.
15962    ///   * UNSPECIFIED - generic internal error (such as in communication
15963    ///     with TEE which doesn't generate zx_status_t errors).
15964    ///   * other errors are possible, such as from communication failures or
15965    ///     server propagation of failures.
15966    AddSecureHeapPhysicalRange {
15967        payload: SecureMemAddSecureHeapPhysicalRangeRequest,
15968        responder: SecureMemAddSecureHeapPhysicalRangeResponder,
15969    },
15970    /// This request from sysmem to the securemem driver conveys a physical
15971    /// range to delete, for a heap whose physical range(s) are set up via
15972    /// sysmem.
15973    ///
15974    /// Only sysmem can call this because only sysmem is handed the client end
15975    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15976    /// securemem driver is the server end of this protocol.
15977    ///
15978    /// The securemem driver must configure all the covered offsets as not
15979    /// protected before responding to this message with success.
15980    ///
15981    /// On failure, the securemem driver must ensure the protected range was not
15982    /// deleted.
15983    ///
15984    /// Sysmem must not call this if dynamic_protection_ranges false.
15985    ///
15986    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15987    /// on various ranges that exist at the time of the call.
15988    ///
15989    /// If any portion of the range being deleted is not also covered by another
15990    /// protected range, then any ongoing DMA to any part of the entire range
15991    /// may be interrupted / may fail, potentially in a way that's disruptive to
15992    /// the entire system (bus lockup or similar, depending on device details).
15993    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15994    /// any portion of the range being deleted, unless the caller has other
15995    /// active ranges covering every block of the range being deleted.  Ongoing
15996    /// DMA to/from blocks outside the range being deleted is never impacted by
15997    /// the deletion.
15998    ///
15999    /// Errors:
16000    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16001    ///     Unexpected heap, or range that doesn't conform to
16002    ///     protected_range_granularity.
16003    ///   * UNSPECIFIED - generic internal error (such as in communication
16004    ///     with TEE which doesn't generate zx_status_t errors).
16005    ///   * NOT_FOUND - the specified range is not found.
16006    ///   * other errors are possible, such as from communication failures or
16007    ///     server propagation of failures.
16008    DeleteSecureHeapPhysicalRange {
16009        payload: SecureMemDeleteSecureHeapPhysicalRangeRequest,
16010        responder: SecureMemDeleteSecureHeapPhysicalRangeResponder,
16011    },
16012    /// This request from sysmem to the securemem driver conveys a physical
16013    /// range to modify and its new base and length, for a heap whose physical
16014    /// range(s) are set up via sysmem.
16015    ///
16016    /// Only sysmem can call this because only sysmem is handed the client end
16017    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
16018    /// securemem driver is the server end of this protocol.
16019    ///
16020    /// The securemem driver must configure the range to cover only the new
16021    /// offsets before responding to this message with success.
16022    ///
16023    /// On failure, the securemem driver must ensure the range was not changed.
16024    ///
16025    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
16026    /// must not call this if !is_mod_protected_range_available.
16027    ///
16028    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16029    /// on various ranges that exist at the time of the call.
16030    ///
16031    /// The range must only be modified at one end or the other, but not both.
16032    /// If the range is getting shorter, and the un-covered blocks are not
16033    /// covered by other active ranges, any ongoing DMA to the entire range
16034    /// that's geting shorter may fail in a way that disrupts the entire system
16035    /// (bus lockup or similar), so the caller must ensure that no DMA is
16036    /// ongoing to any portion of a range that is getting shorter, unless the
16037    /// blocks being un-covered by the modification to this range are all
16038    /// covered by other active ranges, in which case no disruption to ongoing
16039    /// DMA will occur.
16040    ///
16041    /// If a range is modified to become <= zero length, the range is deleted.
16042    ///
16043    /// Errors:
16044    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16045    ///     Unexpected heap, or old_range or new_range that doesn't conform to
16046    ///     protected_range_granularity, or old_range and new_range differ in
16047    ///     both begin and end (disallowed).
16048    ///   * UNSPECIFIED - generic internal error (such as in communication
16049    ///     with TEE which doesn't generate zx_status_t errors).
16050    ///   * NOT_FOUND - the specified range is not found.
16051    ///   * other errors are possible, such as from communication failures or
16052    ///     server propagation of failures.
16053    ModifySecureHeapPhysicalRange {
16054        payload: SecureMemModifySecureHeapPhysicalRangeRequest,
16055        responder: SecureMemModifySecureHeapPhysicalRangeResponder,
16056    },
16057    /// Zero a sub-range of a currently-existing physical range added via
16058    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
16059    /// exactly one physical range, and must not overlap with any other
16060    /// physical range.
16061    ///
16062    /// is_covering_range_explicit - When true, the covering range must be one
16063    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
16064    ///     possibly modified since.  When false, the covering range must not
16065    ///     be one of the ranges explicitly created via
16066    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
16067    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
16068    ///     covering range is typically the entire physical range (or a range
16069    ///     which covers even more) of a heap configured by the TEE and whose
16070    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
16071    ///
16072    /// Ongoing DMA is not disrupted by this request.
16073    ///
16074    /// Errors:
16075    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16076    ///     Unexpected heap.
16077    ///   * UNSPECIFIED - generic internal error (such as in communication
16078    ///     with TEE which doesn't generate zx_status_t errors).
16079    ///   * other errors are possible, such as from communication failures or
16080    ///     server propagation of failures.
16081    ZeroSubRange {
16082        payload: SecureMemZeroSubRangeRequest,
16083        responder: SecureMemZeroSubRangeResponder,
16084    },
16085    /// An interaction was received which does not match any known method.
16086    #[non_exhaustive]
16087    _UnknownMethod {
16088        /// Ordinal of the method that was called.
16089        ordinal: u64,
16090        control_handle: SecureMemControlHandle,
16091        method_type: fidl::MethodType,
16092    },
16093}
16094
16095impl SecureMemRequest {
16096    #[allow(irrefutable_let_patterns)]
16097    pub fn into_get_physical_secure_heaps(
16098        self,
16099    ) -> Option<(SecureMemGetPhysicalSecureHeapsResponder)> {
16100        if let SecureMemRequest::GetPhysicalSecureHeaps { responder } = self {
16101            Some((responder))
16102        } else {
16103            None
16104        }
16105    }
16106
16107    #[allow(irrefutable_let_patterns)]
16108    pub fn into_get_dynamic_secure_heaps(
16109        self,
16110    ) -> Option<(SecureMemGetDynamicSecureHeapsResponder)> {
16111        if let SecureMemRequest::GetDynamicSecureHeaps { responder } = self {
16112            Some((responder))
16113        } else {
16114            None
16115        }
16116    }
16117
16118    #[allow(irrefutable_let_patterns)]
16119    pub fn into_get_physical_secure_heap_properties(
16120        self,
16121    ) -> Option<(
16122        SecureMemGetPhysicalSecureHeapPropertiesRequest,
16123        SecureMemGetPhysicalSecureHeapPropertiesResponder,
16124    )> {
16125        if let SecureMemRequest::GetPhysicalSecureHeapProperties { payload, responder } = self {
16126            Some((payload, responder))
16127        } else {
16128            None
16129        }
16130    }
16131
16132    #[allow(irrefutable_let_patterns)]
16133    pub fn into_add_secure_heap_physical_range(
16134        self,
16135    ) -> Option<(
16136        SecureMemAddSecureHeapPhysicalRangeRequest,
16137        SecureMemAddSecureHeapPhysicalRangeResponder,
16138    )> {
16139        if let SecureMemRequest::AddSecureHeapPhysicalRange { payload, responder } = self {
16140            Some((payload, responder))
16141        } else {
16142            None
16143        }
16144    }
16145
16146    #[allow(irrefutable_let_patterns)]
16147    pub fn into_delete_secure_heap_physical_range(
16148        self,
16149    ) -> Option<(
16150        SecureMemDeleteSecureHeapPhysicalRangeRequest,
16151        SecureMemDeleteSecureHeapPhysicalRangeResponder,
16152    )> {
16153        if let SecureMemRequest::DeleteSecureHeapPhysicalRange { payload, responder } = self {
16154            Some((payload, responder))
16155        } else {
16156            None
16157        }
16158    }
16159
16160    #[allow(irrefutable_let_patterns)]
16161    pub fn into_modify_secure_heap_physical_range(
16162        self,
16163    ) -> Option<(
16164        SecureMemModifySecureHeapPhysicalRangeRequest,
16165        SecureMemModifySecureHeapPhysicalRangeResponder,
16166    )> {
16167        if let SecureMemRequest::ModifySecureHeapPhysicalRange { payload, responder } = self {
16168            Some((payload, responder))
16169        } else {
16170            None
16171        }
16172    }
16173
16174    #[allow(irrefutable_let_patterns)]
16175    pub fn into_zero_sub_range(
16176        self,
16177    ) -> Option<(SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResponder)> {
16178        if let SecureMemRequest::ZeroSubRange { payload, responder } = self {
16179            Some((payload, responder))
16180        } else {
16181            None
16182        }
16183    }
16184
16185    /// Name of the method defined in FIDL
16186    pub fn method_name(&self) -> &'static str {
16187        match *self {
16188            SecureMemRequest::GetPhysicalSecureHeaps { .. } => "get_physical_secure_heaps",
16189            SecureMemRequest::GetDynamicSecureHeaps { .. } => "get_dynamic_secure_heaps",
16190            SecureMemRequest::GetPhysicalSecureHeapProperties { .. } => {
16191                "get_physical_secure_heap_properties"
16192            }
16193            SecureMemRequest::AddSecureHeapPhysicalRange { .. } => "add_secure_heap_physical_range",
16194            SecureMemRequest::DeleteSecureHeapPhysicalRange { .. } => {
16195                "delete_secure_heap_physical_range"
16196            }
16197            SecureMemRequest::ModifySecureHeapPhysicalRange { .. } => {
16198                "modify_secure_heap_physical_range"
16199            }
16200            SecureMemRequest::ZeroSubRange { .. } => "zero_sub_range",
16201            SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
16202                "unknown one-way method"
16203            }
16204            SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
16205                "unknown two-way method"
16206            }
16207        }
16208    }
16209}
16210
16211#[derive(Debug, Clone)]
16212pub struct SecureMemControlHandle {
16213    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
16214}
16215
16216impl fidl::endpoints::ControlHandle for SecureMemControlHandle {
16217    fn shutdown(&self) {
16218        self.inner.shutdown()
16219    }
16220    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
16221        self.inner.shutdown_with_epitaph(status)
16222    }
16223
16224    fn is_closed(&self) -> bool {
16225        self.inner.channel().is_closed()
16226    }
16227    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
16228        self.inner.channel().on_closed()
16229    }
16230
16231    #[cfg(target_os = "fuchsia")]
16232    fn signal_peer(
16233        &self,
16234        clear_mask: zx::Signals,
16235        set_mask: zx::Signals,
16236    ) -> Result<(), zx_status::Status> {
16237        use fidl::Peered;
16238        self.inner.channel().signal_peer(clear_mask, set_mask)
16239    }
16240}
16241
16242impl SecureMemControlHandle {}
16243
16244#[must_use = "FIDL methods require a response to be sent"]
16245#[derive(Debug)]
16246pub struct SecureMemGetPhysicalSecureHeapsResponder {
16247    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16248    tx_id: u32,
16249}
16250
16251/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16252/// if the responder is dropped without sending a response, so that the client
16253/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16254impl std::ops::Drop for SecureMemGetPhysicalSecureHeapsResponder {
16255    fn drop(&mut self) {
16256        self.control_handle.shutdown();
16257        // Safety: drops once, never accessed again
16258        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16259    }
16260}
16261
16262impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapsResponder {
16263    type ControlHandle = SecureMemControlHandle;
16264
16265    fn control_handle(&self) -> &SecureMemControlHandle {
16266        &self.control_handle
16267    }
16268
16269    fn drop_without_shutdown(mut self) {
16270        // Safety: drops once, never accessed again due to mem::forget
16271        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16272        // Prevent Drop from running (which would shut down the channel)
16273        std::mem::forget(self);
16274    }
16275}
16276
16277impl SecureMemGetPhysicalSecureHeapsResponder {
16278    /// Sends a response to the FIDL transaction.
16279    ///
16280    /// Sets the channel to shutdown if an error occurs.
16281    pub fn send(
16282        self,
16283        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16284    ) -> Result<(), fidl::Error> {
16285        let _result = self.send_raw(result);
16286        if _result.is_err() {
16287            self.control_handle.shutdown();
16288        }
16289        self.drop_without_shutdown();
16290        _result
16291    }
16292
16293    /// Similar to "send" but does not shutdown the channel if an error occurs.
16294    pub fn send_no_shutdown_on_err(
16295        self,
16296        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16297    ) -> Result<(), fidl::Error> {
16298        let _result = self.send_raw(result);
16299        self.drop_without_shutdown();
16300        _result
16301    }
16302
16303    fn send_raw(
16304        &self,
16305        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16306    ) -> Result<(), fidl::Error> {
16307        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16308            SecureMemGetPhysicalSecureHeapsResponse,
16309            Error,
16310        >>(
16311            fidl::encoding::FlexibleResult::new(result),
16312            self.tx_id,
16313            0x38716300592073e3,
16314            fidl::encoding::DynamicFlags::FLEXIBLE,
16315        )
16316    }
16317}
16318
16319#[must_use = "FIDL methods require a response to be sent"]
16320#[derive(Debug)]
16321pub struct SecureMemGetDynamicSecureHeapsResponder {
16322    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16323    tx_id: u32,
16324}
16325
16326/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16327/// if the responder is dropped without sending a response, so that the client
16328/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16329impl std::ops::Drop for SecureMemGetDynamicSecureHeapsResponder {
16330    fn drop(&mut self) {
16331        self.control_handle.shutdown();
16332        // Safety: drops once, never accessed again
16333        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16334    }
16335}
16336
16337impl fidl::endpoints::Responder for SecureMemGetDynamicSecureHeapsResponder {
16338    type ControlHandle = SecureMemControlHandle;
16339
16340    fn control_handle(&self) -> &SecureMemControlHandle {
16341        &self.control_handle
16342    }
16343
16344    fn drop_without_shutdown(mut self) {
16345        // Safety: drops once, never accessed again due to mem::forget
16346        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16347        // Prevent Drop from running (which would shut down the channel)
16348        std::mem::forget(self);
16349    }
16350}
16351
16352impl SecureMemGetDynamicSecureHeapsResponder {
16353    /// Sends a response to the FIDL transaction.
16354    ///
16355    /// Sets the channel to shutdown if an error occurs.
16356    pub fn send(
16357        self,
16358        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16359    ) -> Result<(), fidl::Error> {
16360        let _result = self.send_raw(result);
16361        if _result.is_err() {
16362            self.control_handle.shutdown();
16363        }
16364        self.drop_without_shutdown();
16365        _result
16366    }
16367
16368    /// Similar to "send" but does not shutdown the channel if an error occurs.
16369    pub fn send_no_shutdown_on_err(
16370        self,
16371        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16372    ) -> Result<(), fidl::Error> {
16373        let _result = self.send_raw(result);
16374        self.drop_without_shutdown();
16375        _result
16376    }
16377
16378    fn send_raw(
16379        &self,
16380        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16381    ) -> Result<(), fidl::Error> {
16382        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16383            SecureMemGetDynamicSecureHeapsResponse,
16384            Error,
16385        >>(
16386            fidl::encoding::FlexibleResult::new(result),
16387            self.tx_id,
16388            0x1190847f99952834,
16389            fidl::encoding::DynamicFlags::FLEXIBLE,
16390        )
16391    }
16392}
16393
16394#[must_use = "FIDL methods require a response to be sent"]
16395#[derive(Debug)]
16396pub struct SecureMemGetPhysicalSecureHeapPropertiesResponder {
16397    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16398    tx_id: u32,
16399}
16400
16401/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16402/// if the responder is dropped without sending a response, so that the client
16403/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16404impl std::ops::Drop for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16405    fn drop(&mut self) {
16406        self.control_handle.shutdown();
16407        // Safety: drops once, never accessed again
16408        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16409    }
16410}
16411
16412impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16413    type ControlHandle = SecureMemControlHandle;
16414
16415    fn control_handle(&self) -> &SecureMemControlHandle {
16416        &self.control_handle
16417    }
16418
16419    fn drop_without_shutdown(mut self) {
16420        // Safety: drops once, never accessed again due to mem::forget
16421        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16422        // Prevent Drop from running (which would shut down the channel)
16423        std::mem::forget(self);
16424    }
16425}
16426
16427impl SecureMemGetPhysicalSecureHeapPropertiesResponder {
16428    /// Sends a response to the FIDL transaction.
16429    ///
16430    /// Sets the channel to shutdown if an error occurs.
16431    pub fn send(
16432        self,
16433        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16434    ) -> Result<(), fidl::Error> {
16435        let _result = self.send_raw(result);
16436        if _result.is_err() {
16437            self.control_handle.shutdown();
16438        }
16439        self.drop_without_shutdown();
16440        _result
16441    }
16442
16443    /// Similar to "send" but does not shutdown the channel if an error occurs.
16444    pub fn send_no_shutdown_on_err(
16445        self,
16446        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16447    ) -> Result<(), fidl::Error> {
16448        let _result = self.send_raw(result);
16449        self.drop_without_shutdown();
16450        _result
16451    }
16452
16453    fn send_raw(
16454        &self,
16455        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16456    ) -> Result<(), fidl::Error> {
16457        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16458            SecureMemGetPhysicalSecureHeapPropertiesResponse,
16459            Error,
16460        >>(
16461            fidl::encoding::FlexibleResult::new(result),
16462            self.tx_id,
16463            0xc6f06889009c7bc,
16464            fidl::encoding::DynamicFlags::FLEXIBLE,
16465        )
16466    }
16467}
16468
16469#[must_use = "FIDL methods require a response to be sent"]
16470#[derive(Debug)]
16471pub struct SecureMemAddSecureHeapPhysicalRangeResponder {
16472    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16473    tx_id: u32,
16474}
16475
16476/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16477/// if the responder is dropped without sending a response, so that the client
16478/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16479impl std::ops::Drop for SecureMemAddSecureHeapPhysicalRangeResponder {
16480    fn drop(&mut self) {
16481        self.control_handle.shutdown();
16482        // Safety: drops once, never accessed again
16483        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16484    }
16485}
16486
16487impl fidl::endpoints::Responder for SecureMemAddSecureHeapPhysicalRangeResponder {
16488    type ControlHandle = SecureMemControlHandle;
16489
16490    fn control_handle(&self) -> &SecureMemControlHandle {
16491        &self.control_handle
16492    }
16493
16494    fn drop_without_shutdown(mut self) {
16495        // Safety: drops once, never accessed again due to mem::forget
16496        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16497        // Prevent Drop from running (which would shut down the channel)
16498        std::mem::forget(self);
16499    }
16500}
16501
16502impl SecureMemAddSecureHeapPhysicalRangeResponder {
16503    /// Sends a response to the FIDL transaction.
16504    ///
16505    /// Sets the channel to shutdown if an error occurs.
16506    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16507        let _result = self.send_raw(result);
16508        if _result.is_err() {
16509            self.control_handle.shutdown();
16510        }
16511        self.drop_without_shutdown();
16512        _result
16513    }
16514
16515    /// Similar to "send" but does not shutdown the channel if an error occurs.
16516    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16517        let _result = self.send_raw(result);
16518        self.drop_without_shutdown();
16519        _result
16520    }
16521
16522    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16523        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16524            fidl::encoding::EmptyStruct,
16525            Error,
16526        >>(
16527            fidl::encoding::FlexibleResult::new(result),
16528            self.tx_id,
16529            0x35f695b9b6c7217a,
16530            fidl::encoding::DynamicFlags::FLEXIBLE,
16531        )
16532    }
16533}
16534
16535#[must_use = "FIDL methods require a response to be sent"]
16536#[derive(Debug)]
16537pub struct SecureMemDeleteSecureHeapPhysicalRangeResponder {
16538    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16539    tx_id: u32,
16540}
16541
16542/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16543/// if the responder is dropped without sending a response, so that the client
16544/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16545impl std::ops::Drop for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16546    fn drop(&mut self) {
16547        self.control_handle.shutdown();
16548        // Safety: drops once, never accessed again
16549        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16550    }
16551}
16552
16553impl fidl::endpoints::Responder for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16554    type ControlHandle = SecureMemControlHandle;
16555
16556    fn control_handle(&self) -> &SecureMemControlHandle {
16557        &self.control_handle
16558    }
16559
16560    fn drop_without_shutdown(mut self) {
16561        // Safety: drops once, never accessed again due to mem::forget
16562        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16563        // Prevent Drop from running (which would shut down the channel)
16564        std::mem::forget(self);
16565    }
16566}
16567
16568impl SecureMemDeleteSecureHeapPhysicalRangeResponder {
16569    /// Sends a response to the FIDL transaction.
16570    ///
16571    /// Sets the channel to shutdown if an error occurs.
16572    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16573        let _result = self.send_raw(result);
16574        if _result.is_err() {
16575            self.control_handle.shutdown();
16576        }
16577        self.drop_without_shutdown();
16578        _result
16579    }
16580
16581    /// Similar to "send" but does not shutdown the channel if an error occurs.
16582    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16583        let _result = self.send_raw(result);
16584        self.drop_without_shutdown();
16585        _result
16586    }
16587
16588    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16589        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16590            fidl::encoding::EmptyStruct,
16591            Error,
16592        >>(
16593            fidl::encoding::FlexibleResult::new(result),
16594            self.tx_id,
16595            0xeaa58c650264c9e,
16596            fidl::encoding::DynamicFlags::FLEXIBLE,
16597        )
16598    }
16599}
16600
16601#[must_use = "FIDL methods require a response to be sent"]
16602#[derive(Debug)]
16603pub struct SecureMemModifySecureHeapPhysicalRangeResponder {
16604    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16605    tx_id: u32,
16606}
16607
16608/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16609/// if the responder is dropped without sending a response, so that the client
16610/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16611impl std::ops::Drop for SecureMemModifySecureHeapPhysicalRangeResponder {
16612    fn drop(&mut self) {
16613        self.control_handle.shutdown();
16614        // Safety: drops once, never accessed again
16615        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16616    }
16617}
16618
16619impl fidl::endpoints::Responder for SecureMemModifySecureHeapPhysicalRangeResponder {
16620    type ControlHandle = SecureMemControlHandle;
16621
16622    fn control_handle(&self) -> &SecureMemControlHandle {
16623        &self.control_handle
16624    }
16625
16626    fn drop_without_shutdown(mut self) {
16627        // Safety: drops once, never accessed again due to mem::forget
16628        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16629        // Prevent Drop from running (which would shut down the channel)
16630        std::mem::forget(self);
16631    }
16632}
16633
16634impl SecureMemModifySecureHeapPhysicalRangeResponder {
16635    /// Sends a response to the FIDL transaction.
16636    ///
16637    /// Sets the channel to shutdown if an error occurs.
16638    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16639        let _result = self.send_raw(result);
16640        if _result.is_err() {
16641            self.control_handle.shutdown();
16642        }
16643        self.drop_without_shutdown();
16644        _result
16645    }
16646
16647    /// Similar to "send" but does not shutdown the channel if an error occurs.
16648    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16649        let _result = self.send_raw(result);
16650        self.drop_without_shutdown();
16651        _result
16652    }
16653
16654    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16655        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16656            fidl::encoding::EmptyStruct,
16657            Error,
16658        >>(
16659            fidl::encoding::FlexibleResult::new(result),
16660            self.tx_id,
16661            0x60b7448aa1187734,
16662            fidl::encoding::DynamicFlags::FLEXIBLE,
16663        )
16664    }
16665}
16666
16667#[must_use = "FIDL methods require a response to be sent"]
16668#[derive(Debug)]
16669pub struct SecureMemZeroSubRangeResponder {
16670    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16671    tx_id: u32,
16672}
16673
16674/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16675/// if the responder is dropped without sending a response, so that the client
16676/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16677impl std::ops::Drop for SecureMemZeroSubRangeResponder {
16678    fn drop(&mut self) {
16679        self.control_handle.shutdown();
16680        // Safety: drops once, never accessed again
16681        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16682    }
16683}
16684
16685impl fidl::endpoints::Responder for SecureMemZeroSubRangeResponder {
16686    type ControlHandle = SecureMemControlHandle;
16687
16688    fn control_handle(&self) -> &SecureMemControlHandle {
16689        &self.control_handle
16690    }
16691
16692    fn drop_without_shutdown(mut self) {
16693        // Safety: drops once, never accessed again due to mem::forget
16694        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16695        // Prevent Drop from running (which would shut down the channel)
16696        std::mem::forget(self);
16697    }
16698}
16699
16700impl SecureMemZeroSubRangeResponder {
16701    /// Sends a response to the FIDL transaction.
16702    ///
16703    /// Sets the channel to shutdown if an error occurs.
16704    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16705        let _result = self.send_raw(result);
16706        if _result.is_err() {
16707            self.control_handle.shutdown();
16708        }
16709        self.drop_without_shutdown();
16710        _result
16711    }
16712
16713    /// Similar to "send" but does not shutdown the channel if an error occurs.
16714    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16715        let _result = self.send_raw(result);
16716        self.drop_without_shutdown();
16717        _result
16718    }
16719
16720    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16721        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16722            fidl::encoding::EmptyStruct,
16723            Error,
16724        >>(
16725            fidl::encoding::FlexibleResult::new(result),
16726            self.tx_id,
16727            0x5b25b7901a385ce5,
16728            fidl::encoding::DynamicFlags::FLEXIBLE,
16729        )
16730    }
16731}
16732
16733mod internal {
16734    use super::*;
16735
16736    impl AllocatorAllocateNonSharedCollectionRequest {
16737        #[inline(always)]
16738        fn max_ordinal_present(&self) -> u64 {
16739            if let Some(_) = self.collection_request {
16740                return 1;
16741            }
16742            0
16743        }
16744    }
16745
16746    impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16747        type Borrowed<'a> = &'a mut Self;
16748        fn take_or_borrow<'a>(
16749            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
16750        ) -> Self::Borrowed<'a> {
16751            value
16752        }
16753    }
16754
16755    unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16756        type Owned = Self;
16757
16758        #[inline(always)]
16759        fn inline_align(_context: fidl::encoding::Context) -> usize {
16760            8
16761        }
16762
16763        #[inline(always)]
16764        fn inline_size(_context: fidl::encoding::Context) -> usize {
16765            16
16766        }
16767    }
16768
16769    unsafe impl
16770        fidl::encoding::Encode<
16771            AllocatorAllocateNonSharedCollectionRequest,
16772            fidl::encoding::DefaultFuchsiaResourceDialect,
16773        > for &mut AllocatorAllocateNonSharedCollectionRequest
16774    {
16775        unsafe fn encode(
16776            self,
16777            encoder: &mut fidl::encoding::Encoder<
16778                '_,
16779                fidl::encoding::DefaultFuchsiaResourceDialect,
16780            >,
16781            offset: usize,
16782            mut depth: fidl::encoding::Depth,
16783        ) -> fidl::Result<()> {
16784            encoder.debug_check_bounds::<AllocatorAllocateNonSharedCollectionRequest>(offset);
16785            // Vector header
16786            let max_ordinal: u64 = self.max_ordinal_present();
16787            encoder.write_num(max_ordinal, offset);
16788            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
16789            // Calling encoder.out_of_line_offset(0) is not allowed.
16790            if max_ordinal == 0 {
16791                return Ok(());
16792            }
16793            depth.increment()?;
16794            let envelope_size = 8;
16795            let bytes_len = max_ordinal as usize * envelope_size;
16796            #[allow(unused_variables)]
16797            let offset = encoder.out_of_line_offset(bytes_len);
16798            let mut _prev_end_offset: usize = 0;
16799            if 1 > max_ordinal {
16800                return Ok(());
16801            }
16802
16803            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
16804            // are envelope_size bytes.
16805            let cur_offset: usize = (1 - 1) * envelope_size;
16806
16807            // Zero reserved fields.
16808            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
16809
16810            // Safety:
16811            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
16812            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
16813            //   envelope_size bytes, there is always sufficient room.
16814            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
16815            self.collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
16816            encoder, offset + cur_offset, depth
16817        )?;
16818
16819            _prev_end_offset = cur_offset + envelope_size;
16820
16821            Ok(())
16822        }
16823    }
16824
16825    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
16826        for AllocatorAllocateNonSharedCollectionRequest
16827    {
16828        #[inline(always)]
16829        fn new_empty() -> Self {
16830            Self::default()
16831        }
16832
16833        unsafe fn decode(
16834            &mut self,
16835            decoder: &mut fidl::encoding::Decoder<
16836                '_,
16837                fidl::encoding::DefaultFuchsiaResourceDialect,
16838            >,
16839            offset: usize,
16840            mut depth: fidl::encoding::Depth,
16841        ) -> fidl::Result<()> {
16842            decoder.debug_check_bounds::<Self>(offset);
16843            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
16844                None => return Err(fidl::Error::NotNullable),
16845                Some(len) => len,
16846            };
16847            // Calling decoder.out_of_line_offset(0) is not allowed.
16848            if len == 0 {
16849                return Ok(());
16850            };
16851            depth.increment()?;
16852            let envelope_size = 8;
16853            let bytes_len = len * envelope_size;
16854            let offset = decoder.out_of_line_offset(bytes_len)?;
16855            // Decode the envelope for each type.
16856            let mut _next_ordinal_to_read = 0;
16857            let mut next_offset = offset;
16858            let end_offset = offset + bytes_len;
16859            _next_ordinal_to_read += 1;
16860            if next_offset >= end_offset {
16861                return Ok(());
16862            }
16863
16864            // Decode unknown envelopes for gaps in ordinals.
16865            while _next_ordinal_to_read < 1 {
16866                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
16867                _next_ordinal_to_read += 1;
16868                next_offset += envelope_size;
16869            }
16870
16871            let next_out_of_line = decoder.next_out_of_line();
16872            let handles_before = decoder.remaining_handles();
16873            if let Some((inlined, num_bytes, num_handles)) =
16874                fidl::encoding::decode_envelope_header(decoder, next_offset)?
16875            {
16876                let member_inline_size = <fidl::encoding::Endpoint<
16877                    fidl::endpoints::ServerEnd<BufferCollectionMarker>,
16878                > as fidl::encoding::TypeMarker>::inline_size(
16879                    decoder.context
16880                );
16881                if inlined != (member_inline_size <= 4) {
16882                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
16883                }
16884                let inner_offset;
16885                let mut inner_depth = depth.clone();
16886                if inlined {
16887                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
16888                    inner_offset = next_offset;
16889                } else {
16890                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
16891                    inner_depth.increment()?;
16892                }
16893                let val_ref = self.collection_request.get_or_insert_with(|| {
16894                    fidl::new_empty!(
16895                        fidl::encoding::Endpoint<
16896                            fidl::endpoints::ServerEnd<BufferCollectionMarker>,
16897                        >,
16898                        fidl::encoding::DefaultFuchsiaResourceDialect
16899                    )
16900                });
16901                fidl::decode!(
16902                    fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
16903                    fidl::encoding::DefaultFuchsiaResourceDialect,
16904                    val_ref,
16905                    decoder,
16906                    inner_offset,
16907                    inner_depth
16908                )?;
16909                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
16910                {
16911                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
16912                }
16913                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
16914                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
16915                }
16916            }
16917
16918            next_offset += envelope_size;
16919
16920            // Decode the remaining unknown envelopes.
16921            while next_offset < end_offset {
16922                _next_ordinal_to_read += 1;
16923                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
16924                next_offset += envelope_size;
16925            }
16926
16927            Ok(())
16928        }
16929    }
16930
16931    impl AllocatorAllocateSharedCollectionRequest {
16932        #[inline(always)]
16933        fn max_ordinal_present(&self) -> u64 {
16934            if let Some(_) = self.token_request {
16935                return 1;
16936            }
16937            0
16938        }
16939    }
16940
16941    impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateSharedCollectionRequest {
16942        type Borrowed<'a> = &'a mut Self;
16943        fn take_or_borrow<'a>(
16944            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
16945        ) -> Self::Borrowed<'a> {
16946            value
16947        }
16948    }
16949
16950    unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateSharedCollectionRequest {
16951        type Owned = Self;
16952
16953        #[inline(always)]
16954        fn inline_align(_context: fidl::encoding::Context) -> usize {
16955            8
16956        }
16957
16958        #[inline(always)]
16959        fn inline_size(_context: fidl::encoding::Context) -> usize {
16960            16
16961        }
16962    }
16963
16964    unsafe impl
16965        fidl::encoding::Encode<
16966            AllocatorAllocateSharedCollectionRequest,
16967            fidl::encoding::DefaultFuchsiaResourceDialect,
16968        > for &mut AllocatorAllocateSharedCollectionRequest
16969    {
16970        unsafe fn encode(
16971            self,
16972            encoder: &mut fidl::encoding::Encoder<
16973                '_,
16974                fidl::encoding::DefaultFuchsiaResourceDialect,
16975            >,
16976            offset: usize,
16977            mut depth: fidl::encoding::Depth,
16978        ) -> fidl::Result<()> {
16979            encoder.debug_check_bounds::<AllocatorAllocateSharedCollectionRequest>(offset);
16980            // Vector header
16981            let max_ordinal: u64 = self.max_ordinal_present();
16982            encoder.write_num(max_ordinal, offset);
16983            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
16984            // Calling encoder.out_of_line_offset(0) is not allowed.
16985            if max_ordinal == 0 {
16986                return Ok(());
16987            }
16988            depth.increment()?;
16989            let envelope_size = 8;
16990            let bytes_len = max_ordinal as usize * envelope_size;
16991            #[allow(unused_variables)]
16992            let offset = encoder.out_of_line_offset(bytes_len);
16993            let mut _prev_end_offset: usize = 0;
16994            if 1 > max_ordinal {
16995                return Ok(());
16996            }
16997
16998            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
16999            // are envelope_size bytes.
17000            let cur_offset: usize = (1 - 1) * envelope_size;
17001
17002            // Zero reserved fields.
17003            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17004
17005            // Safety:
17006            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17007            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17008            //   envelope_size bytes, there is always sufficient room.
17009            fidl::encoding::encode_in_envelope_optional::<
17010                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
17011                fidl::encoding::DefaultFuchsiaResourceDialect,
17012            >(
17013                self.token_request.as_mut().map(
17014                    <fidl::encoding::Endpoint<
17015                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17016                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17017                ),
17018                encoder,
17019                offset + cur_offset,
17020                depth,
17021            )?;
17022
17023            _prev_end_offset = cur_offset + envelope_size;
17024
17025            Ok(())
17026        }
17027    }
17028
17029    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17030        for AllocatorAllocateSharedCollectionRequest
17031    {
17032        #[inline(always)]
17033        fn new_empty() -> Self {
17034            Self::default()
17035        }
17036
17037        unsafe fn decode(
17038            &mut self,
17039            decoder: &mut fidl::encoding::Decoder<
17040                '_,
17041                fidl::encoding::DefaultFuchsiaResourceDialect,
17042            >,
17043            offset: usize,
17044            mut depth: fidl::encoding::Depth,
17045        ) -> fidl::Result<()> {
17046            decoder.debug_check_bounds::<Self>(offset);
17047            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17048                None => return Err(fidl::Error::NotNullable),
17049                Some(len) => len,
17050            };
17051            // Calling decoder.out_of_line_offset(0) is not allowed.
17052            if len == 0 {
17053                return Ok(());
17054            };
17055            depth.increment()?;
17056            let envelope_size = 8;
17057            let bytes_len = len * envelope_size;
17058            let offset = decoder.out_of_line_offset(bytes_len)?;
17059            // Decode the envelope for each type.
17060            let mut _next_ordinal_to_read = 0;
17061            let mut next_offset = offset;
17062            let end_offset = offset + bytes_len;
17063            _next_ordinal_to_read += 1;
17064            if next_offset >= end_offset {
17065                return Ok(());
17066            }
17067
17068            // Decode unknown envelopes for gaps in ordinals.
17069            while _next_ordinal_to_read < 1 {
17070                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17071                _next_ordinal_to_read += 1;
17072                next_offset += envelope_size;
17073            }
17074
17075            let next_out_of_line = decoder.next_out_of_line();
17076            let handles_before = decoder.remaining_handles();
17077            if let Some((inlined, num_bytes, num_handles)) =
17078                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17079            {
17080                let member_inline_size = <fidl::encoding::Endpoint<
17081                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17082                > as fidl::encoding::TypeMarker>::inline_size(
17083                    decoder.context
17084                );
17085                if inlined != (member_inline_size <= 4) {
17086                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17087                }
17088                let inner_offset;
17089                let mut inner_depth = depth.clone();
17090                if inlined {
17091                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17092                    inner_offset = next_offset;
17093                } else {
17094                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17095                    inner_depth.increment()?;
17096                }
17097                let val_ref = self.token_request.get_or_insert_with(|| {
17098                    fidl::new_empty!(
17099                        fidl::encoding::Endpoint<
17100                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17101                        >,
17102                        fidl::encoding::DefaultFuchsiaResourceDialect
17103                    )
17104                });
17105                fidl::decode!(
17106                    fidl::encoding::Endpoint<
17107                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17108                    >,
17109                    fidl::encoding::DefaultFuchsiaResourceDialect,
17110                    val_ref,
17111                    decoder,
17112                    inner_offset,
17113                    inner_depth
17114                )?;
17115                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17116                {
17117                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17118                }
17119                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17120                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17121                }
17122            }
17123
17124            next_offset += envelope_size;
17125
17126            // Decode the remaining unknown envelopes.
17127            while next_offset < end_offset {
17128                _next_ordinal_to_read += 1;
17129                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17130                next_offset += envelope_size;
17131            }
17132
17133            Ok(())
17134        }
17135    }
17136
17137    impl AllocatorBindSharedCollectionRequest {
17138        #[inline(always)]
17139        fn max_ordinal_present(&self) -> u64 {
17140            if let Some(_) = self.buffer_collection_request {
17141                return 2;
17142            }
17143            if let Some(_) = self.token {
17144                return 1;
17145            }
17146            0
17147        }
17148    }
17149
17150    impl fidl::encoding::ResourceTypeMarker for AllocatorBindSharedCollectionRequest {
17151        type Borrowed<'a> = &'a mut Self;
17152        fn take_or_borrow<'a>(
17153            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17154        ) -> Self::Borrowed<'a> {
17155            value
17156        }
17157    }
17158
17159    unsafe impl fidl::encoding::TypeMarker for AllocatorBindSharedCollectionRequest {
17160        type Owned = Self;
17161
17162        #[inline(always)]
17163        fn inline_align(_context: fidl::encoding::Context) -> usize {
17164            8
17165        }
17166
17167        #[inline(always)]
17168        fn inline_size(_context: fidl::encoding::Context) -> usize {
17169            16
17170        }
17171    }
17172
17173    unsafe impl
17174        fidl::encoding::Encode<
17175            AllocatorBindSharedCollectionRequest,
17176            fidl::encoding::DefaultFuchsiaResourceDialect,
17177        > for &mut AllocatorBindSharedCollectionRequest
17178    {
17179        unsafe fn encode(
17180            self,
17181            encoder: &mut fidl::encoding::Encoder<
17182                '_,
17183                fidl::encoding::DefaultFuchsiaResourceDialect,
17184            >,
17185            offset: usize,
17186            mut depth: fidl::encoding::Depth,
17187        ) -> fidl::Result<()> {
17188            encoder.debug_check_bounds::<AllocatorBindSharedCollectionRequest>(offset);
17189            // Vector header
17190            let max_ordinal: u64 = self.max_ordinal_present();
17191            encoder.write_num(max_ordinal, offset);
17192            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17193            // Calling encoder.out_of_line_offset(0) is not allowed.
17194            if max_ordinal == 0 {
17195                return Ok(());
17196            }
17197            depth.increment()?;
17198            let envelope_size = 8;
17199            let bytes_len = max_ordinal as usize * envelope_size;
17200            #[allow(unused_variables)]
17201            let offset = encoder.out_of_line_offset(bytes_len);
17202            let mut _prev_end_offset: usize = 0;
17203            if 1 > max_ordinal {
17204                return Ok(());
17205            }
17206
17207            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17208            // are envelope_size bytes.
17209            let cur_offset: usize = (1 - 1) * envelope_size;
17210
17211            // Zero reserved fields.
17212            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17213
17214            // Safety:
17215            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17216            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17217            //   envelope_size bytes, there is always sufficient room.
17218            fidl::encoding::encode_in_envelope_optional::<
17219                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
17220                fidl::encoding::DefaultFuchsiaResourceDialect,
17221            >(
17222                self.token.as_mut().map(
17223                    <fidl::encoding::Endpoint<
17224                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17225                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17226                ),
17227                encoder,
17228                offset + cur_offset,
17229                depth,
17230            )?;
17231
17232            _prev_end_offset = cur_offset + envelope_size;
17233            if 2 > max_ordinal {
17234                return Ok(());
17235            }
17236
17237            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17238            // are envelope_size bytes.
17239            let cur_offset: usize = (2 - 1) * envelope_size;
17240
17241            // Zero reserved fields.
17242            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17243
17244            // Safety:
17245            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17246            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17247            //   envelope_size bytes, there is always sufficient room.
17248            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
17249            self.buffer_collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
17250            encoder, offset + cur_offset, depth
17251        )?;
17252
17253            _prev_end_offset = cur_offset + envelope_size;
17254
17255            Ok(())
17256        }
17257    }
17258
17259    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17260        for AllocatorBindSharedCollectionRequest
17261    {
17262        #[inline(always)]
17263        fn new_empty() -> Self {
17264            Self::default()
17265        }
17266
17267        unsafe fn decode(
17268            &mut self,
17269            decoder: &mut fidl::encoding::Decoder<
17270                '_,
17271                fidl::encoding::DefaultFuchsiaResourceDialect,
17272            >,
17273            offset: usize,
17274            mut depth: fidl::encoding::Depth,
17275        ) -> fidl::Result<()> {
17276            decoder.debug_check_bounds::<Self>(offset);
17277            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17278                None => return Err(fidl::Error::NotNullable),
17279                Some(len) => len,
17280            };
17281            // Calling decoder.out_of_line_offset(0) is not allowed.
17282            if len == 0 {
17283                return Ok(());
17284            };
17285            depth.increment()?;
17286            let envelope_size = 8;
17287            let bytes_len = len * envelope_size;
17288            let offset = decoder.out_of_line_offset(bytes_len)?;
17289            // Decode the envelope for each type.
17290            let mut _next_ordinal_to_read = 0;
17291            let mut next_offset = offset;
17292            let end_offset = offset + bytes_len;
17293            _next_ordinal_to_read += 1;
17294            if next_offset >= end_offset {
17295                return Ok(());
17296            }
17297
17298            // Decode unknown envelopes for gaps in ordinals.
17299            while _next_ordinal_to_read < 1 {
17300                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17301                _next_ordinal_to_read += 1;
17302                next_offset += envelope_size;
17303            }
17304
17305            let next_out_of_line = decoder.next_out_of_line();
17306            let handles_before = decoder.remaining_handles();
17307            if let Some((inlined, num_bytes, num_handles)) =
17308                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17309            {
17310                let member_inline_size = <fidl::encoding::Endpoint<
17311                    fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17312                > as fidl::encoding::TypeMarker>::inline_size(
17313                    decoder.context
17314                );
17315                if inlined != (member_inline_size <= 4) {
17316                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17317                }
17318                let inner_offset;
17319                let mut inner_depth = depth.clone();
17320                if inlined {
17321                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17322                    inner_offset = next_offset;
17323                } else {
17324                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17325                    inner_depth.increment()?;
17326                }
17327                let val_ref = self.token.get_or_insert_with(|| {
17328                    fidl::new_empty!(
17329                        fidl::encoding::Endpoint<
17330                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17331                        >,
17332                        fidl::encoding::DefaultFuchsiaResourceDialect
17333                    )
17334                });
17335                fidl::decode!(
17336                    fidl::encoding::Endpoint<
17337                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17338                    >,
17339                    fidl::encoding::DefaultFuchsiaResourceDialect,
17340                    val_ref,
17341                    decoder,
17342                    inner_offset,
17343                    inner_depth
17344                )?;
17345                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17346                {
17347                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17348                }
17349                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17350                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17351                }
17352            }
17353
17354            next_offset += envelope_size;
17355            _next_ordinal_to_read += 1;
17356            if next_offset >= end_offset {
17357                return Ok(());
17358            }
17359
17360            // Decode unknown envelopes for gaps in ordinals.
17361            while _next_ordinal_to_read < 2 {
17362                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17363                _next_ordinal_to_read += 1;
17364                next_offset += envelope_size;
17365            }
17366
17367            let next_out_of_line = decoder.next_out_of_line();
17368            let handles_before = decoder.remaining_handles();
17369            if let Some((inlined, num_bytes, num_handles)) =
17370                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17371            {
17372                let member_inline_size = <fidl::encoding::Endpoint<
17373                    fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17374                > as fidl::encoding::TypeMarker>::inline_size(
17375                    decoder.context
17376                );
17377                if inlined != (member_inline_size <= 4) {
17378                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17379                }
17380                let inner_offset;
17381                let mut inner_depth = depth.clone();
17382                if inlined {
17383                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17384                    inner_offset = next_offset;
17385                } else {
17386                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17387                    inner_depth.increment()?;
17388                }
17389                let val_ref = self.buffer_collection_request.get_or_insert_with(|| {
17390                    fidl::new_empty!(
17391                        fidl::encoding::Endpoint<
17392                            fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17393                        >,
17394                        fidl::encoding::DefaultFuchsiaResourceDialect
17395                    )
17396                });
17397                fidl::decode!(
17398                    fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17399                    fidl::encoding::DefaultFuchsiaResourceDialect,
17400                    val_ref,
17401                    decoder,
17402                    inner_offset,
17403                    inner_depth
17404                )?;
17405                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17406                {
17407                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17408                }
17409                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17410                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17411                }
17412            }
17413
17414            next_offset += envelope_size;
17415
17416            // Decode the remaining unknown envelopes.
17417            while next_offset < end_offset {
17418                _next_ordinal_to_read += 1;
17419                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17420                next_offset += envelope_size;
17421            }
17422
17423            Ok(())
17424        }
17425    }
17426
17427    impl AllocatorGetVmoInfoRequest {
17428        #[inline(always)]
17429        fn max_ordinal_present(&self) -> u64 {
17430            if let Some(_) = self.vmo {
17431                return 1;
17432            }
17433            0
17434        }
17435    }
17436
17437    impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoRequest {
17438        type Borrowed<'a> = &'a mut Self;
17439        fn take_or_borrow<'a>(
17440            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17441        ) -> Self::Borrowed<'a> {
17442            value
17443        }
17444    }
17445
17446    unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoRequest {
17447        type Owned = Self;
17448
17449        #[inline(always)]
17450        fn inline_align(_context: fidl::encoding::Context) -> usize {
17451            8
17452        }
17453
17454        #[inline(always)]
17455        fn inline_size(_context: fidl::encoding::Context) -> usize {
17456            16
17457        }
17458    }
17459
17460    unsafe impl
17461        fidl::encoding::Encode<
17462            AllocatorGetVmoInfoRequest,
17463            fidl::encoding::DefaultFuchsiaResourceDialect,
17464        > for &mut AllocatorGetVmoInfoRequest
17465    {
17466        unsafe fn encode(
17467            self,
17468            encoder: &mut fidl::encoding::Encoder<
17469                '_,
17470                fidl::encoding::DefaultFuchsiaResourceDialect,
17471            >,
17472            offset: usize,
17473            mut depth: fidl::encoding::Depth,
17474        ) -> fidl::Result<()> {
17475            encoder.debug_check_bounds::<AllocatorGetVmoInfoRequest>(offset);
17476            // Vector header
17477            let max_ordinal: u64 = self.max_ordinal_present();
17478            encoder.write_num(max_ordinal, offset);
17479            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17480            // Calling encoder.out_of_line_offset(0) is not allowed.
17481            if max_ordinal == 0 {
17482                return Ok(());
17483            }
17484            depth.increment()?;
17485            let envelope_size = 8;
17486            let bytes_len = max_ordinal as usize * envelope_size;
17487            #[allow(unused_variables)]
17488            let offset = encoder.out_of_line_offset(bytes_len);
17489            let mut _prev_end_offset: usize = 0;
17490            if 1 > max_ordinal {
17491                return Ok(());
17492            }
17493
17494            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17495            // are envelope_size bytes.
17496            let cur_offset: usize = (1 - 1) * envelope_size;
17497
17498            // Zero reserved fields.
17499            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17500
17501            // Safety:
17502            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17503            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17504            //   envelope_size bytes, there is always sufficient room.
17505            fidl::encoding::encode_in_envelope_optional::<
17506                fidl::encoding::HandleType<
17507                    fidl::Vmo,
17508                    { fidl::ObjectType::VMO.into_raw() },
17509                    2147483648,
17510                >,
17511                fidl::encoding::DefaultFuchsiaResourceDialect,
17512            >(
17513                self.vmo.as_mut().map(
17514                    <fidl::encoding::HandleType<
17515                        fidl::Vmo,
17516                        { fidl::ObjectType::VMO.into_raw() },
17517                        2147483648,
17518                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17519                ),
17520                encoder,
17521                offset + cur_offset,
17522                depth,
17523            )?;
17524
17525            _prev_end_offset = cur_offset + envelope_size;
17526
17527            Ok(())
17528        }
17529    }
17530
17531    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17532        for AllocatorGetVmoInfoRequest
17533    {
17534        #[inline(always)]
17535        fn new_empty() -> Self {
17536            Self::default()
17537        }
17538
17539        unsafe fn decode(
17540            &mut self,
17541            decoder: &mut fidl::encoding::Decoder<
17542                '_,
17543                fidl::encoding::DefaultFuchsiaResourceDialect,
17544            >,
17545            offset: usize,
17546            mut depth: fidl::encoding::Depth,
17547        ) -> fidl::Result<()> {
17548            decoder.debug_check_bounds::<Self>(offset);
17549            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17550                None => return Err(fidl::Error::NotNullable),
17551                Some(len) => len,
17552            };
17553            // Calling decoder.out_of_line_offset(0) is not allowed.
17554            if len == 0 {
17555                return Ok(());
17556            };
17557            depth.increment()?;
17558            let envelope_size = 8;
17559            let bytes_len = len * envelope_size;
17560            let offset = decoder.out_of_line_offset(bytes_len)?;
17561            // Decode the envelope for each type.
17562            let mut _next_ordinal_to_read = 0;
17563            let mut next_offset = offset;
17564            let end_offset = offset + bytes_len;
17565            _next_ordinal_to_read += 1;
17566            if next_offset >= end_offset {
17567                return Ok(());
17568            }
17569
17570            // Decode unknown envelopes for gaps in ordinals.
17571            while _next_ordinal_to_read < 1 {
17572                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17573                _next_ordinal_to_read += 1;
17574                next_offset += envelope_size;
17575            }
17576
17577            let next_out_of_line = decoder.next_out_of_line();
17578            let handles_before = decoder.remaining_handles();
17579            if let Some((inlined, num_bytes, num_handles)) =
17580                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17581            {
17582                let member_inline_size = <fidl::encoding::HandleType<
17583                    fidl::Vmo,
17584                    { fidl::ObjectType::VMO.into_raw() },
17585                    2147483648,
17586                > as fidl::encoding::TypeMarker>::inline_size(
17587                    decoder.context
17588                );
17589                if inlined != (member_inline_size <= 4) {
17590                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17591                }
17592                let inner_offset;
17593                let mut inner_depth = depth.clone();
17594                if inlined {
17595                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17596                    inner_offset = next_offset;
17597                } else {
17598                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17599                    inner_depth.increment()?;
17600                }
17601                let val_ref =
17602                self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
17603                fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
17604                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17605                {
17606                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17607                }
17608                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17609                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17610                }
17611            }
17612
17613            next_offset += envelope_size;
17614
17615            // Decode the remaining unknown envelopes.
17616            while next_offset < end_offset {
17617                _next_ordinal_to_read += 1;
17618                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17619                next_offset += envelope_size;
17620            }
17621
17622            Ok(())
17623        }
17624    }
17625
17626    impl AllocatorGetVmoInfoResponse {
17627        #[inline(always)]
17628        fn max_ordinal_present(&self) -> u64 {
17629            if let Some(_) = self.close_weak_asap {
17630                return 3;
17631            }
17632            if let Some(_) = self.buffer_index {
17633                return 2;
17634            }
17635            if let Some(_) = self.buffer_collection_id {
17636                return 1;
17637            }
17638            0
17639        }
17640    }
17641
17642    impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoResponse {
17643        type Borrowed<'a> = &'a mut Self;
17644        fn take_or_borrow<'a>(
17645            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17646        ) -> Self::Borrowed<'a> {
17647            value
17648        }
17649    }
17650
17651    unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoResponse {
17652        type Owned = Self;
17653
17654        #[inline(always)]
17655        fn inline_align(_context: fidl::encoding::Context) -> usize {
17656            8
17657        }
17658
17659        #[inline(always)]
17660        fn inline_size(_context: fidl::encoding::Context) -> usize {
17661            16
17662        }
17663    }
17664
17665    unsafe impl
17666        fidl::encoding::Encode<
17667            AllocatorGetVmoInfoResponse,
17668            fidl::encoding::DefaultFuchsiaResourceDialect,
17669        > for &mut AllocatorGetVmoInfoResponse
17670    {
17671        unsafe fn encode(
17672            self,
17673            encoder: &mut fidl::encoding::Encoder<
17674                '_,
17675                fidl::encoding::DefaultFuchsiaResourceDialect,
17676            >,
17677            offset: usize,
17678            mut depth: fidl::encoding::Depth,
17679        ) -> fidl::Result<()> {
17680            encoder.debug_check_bounds::<AllocatorGetVmoInfoResponse>(offset);
17681            // Vector header
17682            let max_ordinal: u64 = self.max_ordinal_present();
17683            encoder.write_num(max_ordinal, offset);
17684            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17685            // Calling encoder.out_of_line_offset(0) is not allowed.
17686            if max_ordinal == 0 {
17687                return Ok(());
17688            }
17689            depth.increment()?;
17690            let envelope_size = 8;
17691            let bytes_len = max_ordinal as usize * envelope_size;
17692            #[allow(unused_variables)]
17693            let offset = encoder.out_of_line_offset(bytes_len);
17694            let mut _prev_end_offset: usize = 0;
17695            if 1 > max_ordinal {
17696                return Ok(());
17697            }
17698
17699            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17700            // are envelope_size bytes.
17701            let cur_offset: usize = (1 - 1) * envelope_size;
17702
17703            // Zero reserved fields.
17704            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17705
17706            // Safety:
17707            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17708            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17709            //   envelope_size bytes, there is always sufficient room.
17710            fidl::encoding::encode_in_envelope_optional::<
17711                u64,
17712                fidl::encoding::DefaultFuchsiaResourceDialect,
17713            >(
17714                self.buffer_collection_id
17715                    .as_ref()
17716                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17717                encoder,
17718                offset + cur_offset,
17719                depth,
17720            )?;
17721
17722            _prev_end_offset = cur_offset + envelope_size;
17723            if 2 > max_ordinal {
17724                return Ok(());
17725            }
17726
17727            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17728            // are envelope_size bytes.
17729            let cur_offset: usize = (2 - 1) * envelope_size;
17730
17731            // Zero reserved fields.
17732            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17733
17734            // Safety:
17735            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17736            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17737            //   envelope_size bytes, there is always sufficient room.
17738            fidl::encoding::encode_in_envelope_optional::<
17739                u64,
17740                fidl::encoding::DefaultFuchsiaResourceDialect,
17741            >(
17742                self.buffer_index.as_ref().map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17743                encoder,
17744                offset + cur_offset,
17745                depth,
17746            )?;
17747
17748            _prev_end_offset = cur_offset + envelope_size;
17749            if 3 > max_ordinal {
17750                return Ok(());
17751            }
17752
17753            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17754            // are envelope_size bytes.
17755            let cur_offset: usize = (3 - 1) * envelope_size;
17756
17757            // Zero reserved fields.
17758            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17759
17760            // Safety:
17761            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17762            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17763            //   envelope_size bytes, there is always sufficient room.
17764            fidl::encoding::encode_in_envelope_optional::<
17765                fidl::encoding::HandleType<
17766                    fidl::EventPair,
17767                    { fidl::ObjectType::EVENTPAIR.into_raw() },
17768                    2147483648,
17769                >,
17770                fidl::encoding::DefaultFuchsiaResourceDialect,
17771            >(
17772                self.close_weak_asap.as_mut().map(
17773                    <fidl::encoding::HandleType<
17774                        fidl::EventPair,
17775                        { fidl::ObjectType::EVENTPAIR.into_raw() },
17776                        2147483648,
17777                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17778                ),
17779                encoder,
17780                offset + cur_offset,
17781                depth,
17782            )?;
17783
17784            _prev_end_offset = cur_offset + envelope_size;
17785
17786            Ok(())
17787        }
17788    }
17789
17790    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17791        for AllocatorGetVmoInfoResponse
17792    {
17793        #[inline(always)]
17794        fn new_empty() -> Self {
17795            Self::default()
17796        }
17797
17798        unsafe fn decode(
17799            &mut self,
17800            decoder: &mut fidl::encoding::Decoder<
17801                '_,
17802                fidl::encoding::DefaultFuchsiaResourceDialect,
17803            >,
17804            offset: usize,
17805            mut depth: fidl::encoding::Depth,
17806        ) -> fidl::Result<()> {
17807            decoder.debug_check_bounds::<Self>(offset);
17808            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17809                None => return Err(fidl::Error::NotNullable),
17810                Some(len) => len,
17811            };
17812            // Calling decoder.out_of_line_offset(0) is not allowed.
17813            if len == 0 {
17814                return Ok(());
17815            };
17816            depth.increment()?;
17817            let envelope_size = 8;
17818            let bytes_len = len * envelope_size;
17819            let offset = decoder.out_of_line_offset(bytes_len)?;
17820            // Decode the envelope for each type.
17821            let mut _next_ordinal_to_read = 0;
17822            let mut next_offset = offset;
17823            let end_offset = offset + bytes_len;
17824            _next_ordinal_to_read += 1;
17825            if next_offset >= end_offset {
17826                return Ok(());
17827            }
17828
17829            // Decode unknown envelopes for gaps in ordinals.
17830            while _next_ordinal_to_read < 1 {
17831                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17832                _next_ordinal_to_read += 1;
17833                next_offset += envelope_size;
17834            }
17835
17836            let next_out_of_line = decoder.next_out_of_line();
17837            let handles_before = decoder.remaining_handles();
17838            if let Some((inlined, num_bytes, num_handles)) =
17839                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17840            {
17841                let member_inline_size =
17842                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
17843                if inlined != (member_inline_size <= 4) {
17844                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17845                }
17846                let inner_offset;
17847                let mut inner_depth = depth.clone();
17848                if inlined {
17849                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17850                    inner_offset = next_offset;
17851                } else {
17852                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17853                    inner_depth.increment()?;
17854                }
17855                let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
17856                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
17857                });
17858                fidl::decode!(
17859                    u64,
17860                    fidl::encoding::DefaultFuchsiaResourceDialect,
17861                    val_ref,
17862                    decoder,
17863                    inner_offset,
17864                    inner_depth
17865                )?;
17866                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17867                {
17868                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17869                }
17870                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17871                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17872                }
17873            }
17874
17875            next_offset += envelope_size;
17876            _next_ordinal_to_read += 1;
17877            if next_offset >= end_offset {
17878                return Ok(());
17879            }
17880
17881            // Decode unknown envelopes for gaps in ordinals.
17882            while _next_ordinal_to_read < 2 {
17883                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17884                _next_ordinal_to_read += 1;
17885                next_offset += envelope_size;
17886            }
17887
17888            let next_out_of_line = decoder.next_out_of_line();
17889            let handles_before = decoder.remaining_handles();
17890            if let Some((inlined, num_bytes, num_handles)) =
17891                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17892            {
17893                let member_inline_size =
17894                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
17895                if inlined != (member_inline_size <= 4) {
17896                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17897                }
17898                let inner_offset;
17899                let mut inner_depth = depth.clone();
17900                if inlined {
17901                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17902                    inner_offset = next_offset;
17903                } else {
17904                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17905                    inner_depth.increment()?;
17906                }
17907                let val_ref = self.buffer_index.get_or_insert_with(|| {
17908                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
17909                });
17910                fidl::decode!(
17911                    u64,
17912                    fidl::encoding::DefaultFuchsiaResourceDialect,
17913                    val_ref,
17914                    decoder,
17915                    inner_offset,
17916                    inner_depth
17917                )?;
17918                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17919                {
17920                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17921                }
17922                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17923                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17924                }
17925            }
17926
17927            next_offset += envelope_size;
17928            _next_ordinal_to_read += 1;
17929            if next_offset >= end_offset {
17930                return Ok(());
17931            }
17932
17933            // Decode unknown envelopes for gaps in ordinals.
17934            while _next_ordinal_to_read < 3 {
17935                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17936                _next_ordinal_to_read += 1;
17937                next_offset += envelope_size;
17938            }
17939
17940            let next_out_of_line = decoder.next_out_of_line();
17941            let handles_before = decoder.remaining_handles();
17942            if let Some((inlined, num_bytes, num_handles)) =
17943                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17944            {
17945                let member_inline_size = <fidl::encoding::HandleType<
17946                    fidl::EventPair,
17947                    { fidl::ObjectType::EVENTPAIR.into_raw() },
17948                    2147483648,
17949                > as fidl::encoding::TypeMarker>::inline_size(
17950                    decoder.context
17951                );
17952                if inlined != (member_inline_size <= 4) {
17953                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17954                }
17955                let inner_offset;
17956                let mut inner_depth = depth.clone();
17957                if inlined {
17958                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17959                    inner_offset = next_offset;
17960                } else {
17961                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17962                    inner_depth.increment()?;
17963                }
17964                let val_ref =
17965                self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
17966                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
17967                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17968                {
17969                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17970                }
17971                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17972                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17973                }
17974            }
17975
17976            next_offset += envelope_size;
17977
17978            // Decode the remaining unknown envelopes.
17979            while next_offset < end_offset {
17980                _next_ordinal_to_read += 1;
17981                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17982                next_offset += envelope_size;
17983            }
17984
17985            Ok(())
17986        }
17987    }
17988
17989    impl BufferCollectionAttachLifetimeTrackingRequest {
17990        #[inline(always)]
17991        fn max_ordinal_present(&self) -> u64 {
17992            if let Some(_) = self.buffers_remaining {
17993                return 2;
17994            }
17995            if let Some(_) = self.server_end {
17996                return 1;
17997            }
17998            0
17999        }
18000    }
18001
18002    impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18003        type Borrowed<'a> = &'a mut Self;
18004        fn take_or_borrow<'a>(
18005            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18006        ) -> Self::Borrowed<'a> {
18007            value
18008        }
18009    }
18010
18011    unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18012        type Owned = Self;
18013
18014        #[inline(always)]
18015        fn inline_align(_context: fidl::encoding::Context) -> usize {
18016            8
18017        }
18018
18019        #[inline(always)]
18020        fn inline_size(_context: fidl::encoding::Context) -> usize {
18021            16
18022        }
18023    }
18024
18025    unsafe impl
18026        fidl::encoding::Encode<
18027            BufferCollectionAttachLifetimeTrackingRequest,
18028            fidl::encoding::DefaultFuchsiaResourceDialect,
18029        > for &mut BufferCollectionAttachLifetimeTrackingRequest
18030    {
18031        unsafe fn encode(
18032            self,
18033            encoder: &mut fidl::encoding::Encoder<
18034                '_,
18035                fidl::encoding::DefaultFuchsiaResourceDialect,
18036            >,
18037            offset: usize,
18038            mut depth: fidl::encoding::Depth,
18039        ) -> fidl::Result<()> {
18040            encoder.debug_check_bounds::<BufferCollectionAttachLifetimeTrackingRequest>(offset);
18041            // Vector header
18042            let max_ordinal: u64 = self.max_ordinal_present();
18043            encoder.write_num(max_ordinal, offset);
18044            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18045            // Calling encoder.out_of_line_offset(0) is not allowed.
18046            if max_ordinal == 0 {
18047                return Ok(());
18048            }
18049            depth.increment()?;
18050            let envelope_size = 8;
18051            let bytes_len = max_ordinal as usize * envelope_size;
18052            #[allow(unused_variables)]
18053            let offset = encoder.out_of_line_offset(bytes_len);
18054            let mut _prev_end_offset: usize = 0;
18055            if 1 > max_ordinal {
18056                return Ok(());
18057            }
18058
18059            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18060            // are envelope_size bytes.
18061            let cur_offset: usize = (1 - 1) * envelope_size;
18062
18063            // Zero reserved fields.
18064            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18065
18066            // Safety:
18067            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18068            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18069            //   envelope_size bytes, there is always sufficient room.
18070            fidl::encoding::encode_in_envelope_optional::<
18071                fidl::encoding::HandleType<
18072                    fidl::EventPair,
18073                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18074                    2147483648,
18075                >,
18076                fidl::encoding::DefaultFuchsiaResourceDialect,
18077            >(
18078                self.server_end.as_mut().map(
18079                    <fidl::encoding::HandleType<
18080                        fidl::EventPair,
18081                        { fidl::ObjectType::EVENTPAIR.into_raw() },
18082                        2147483648,
18083                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18084                ),
18085                encoder,
18086                offset + cur_offset,
18087                depth,
18088            )?;
18089
18090            _prev_end_offset = cur_offset + envelope_size;
18091            if 2 > max_ordinal {
18092                return Ok(());
18093            }
18094
18095            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18096            // are envelope_size bytes.
18097            let cur_offset: usize = (2 - 1) * envelope_size;
18098
18099            // Zero reserved fields.
18100            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18101
18102            // Safety:
18103            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18104            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18105            //   envelope_size bytes, there is always sufficient room.
18106            fidl::encoding::encode_in_envelope_optional::<
18107                u32,
18108                fidl::encoding::DefaultFuchsiaResourceDialect,
18109            >(
18110                self.buffers_remaining
18111                    .as_ref()
18112                    .map(<u32 as fidl::encoding::ValueTypeMarker>::borrow),
18113                encoder,
18114                offset + cur_offset,
18115                depth,
18116            )?;
18117
18118            _prev_end_offset = cur_offset + envelope_size;
18119
18120            Ok(())
18121        }
18122    }
18123
18124    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18125        for BufferCollectionAttachLifetimeTrackingRequest
18126    {
18127        #[inline(always)]
18128        fn new_empty() -> Self {
18129            Self::default()
18130        }
18131
18132        unsafe fn decode(
18133            &mut self,
18134            decoder: &mut fidl::encoding::Decoder<
18135                '_,
18136                fidl::encoding::DefaultFuchsiaResourceDialect,
18137            >,
18138            offset: usize,
18139            mut depth: fidl::encoding::Depth,
18140        ) -> fidl::Result<()> {
18141            decoder.debug_check_bounds::<Self>(offset);
18142            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18143                None => return Err(fidl::Error::NotNullable),
18144                Some(len) => len,
18145            };
18146            // Calling decoder.out_of_line_offset(0) is not allowed.
18147            if len == 0 {
18148                return Ok(());
18149            };
18150            depth.increment()?;
18151            let envelope_size = 8;
18152            let bytes_len = len * envelope_size;
18153            let offset = decoder.out_of_line_offset(bytes_len)?;
18154            // Decode the envelope for each type.
18155            let mut _next_ordinal_to_read = 0;
18156            let mut next_offset = offset;
18157            let end_offset = offset + bytes_len;
18158            _next_ordinal_to_read += 1;
18159            if next_offset >= end_offset {
18160                return Ok(());
18161            }
18162
18163            // Decode unknown envelopes for gaps in ordinals.
18164            while _next_ordinal_to_read < 1 {
18165                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18166                _next_ordinal_to_read += 1;
18167                next_offset += envelope_size;
18168            }
18169
18170            let next_out_of_line = decoder.next_out_of_line();
18171            let handles_before = decoder.remaining_handles();
18172            if let Some((inlined, num_bytes, num_handles)) =
18173                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18174            {
18175                let member_inline_size = <fidl::encoding::HandleType<
18176                    fidl::EventPair,
18177                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18178                    2147483648,
18179                > as fidl::encoding::TypeMarker>::inline_size(
18180                    decoder.context
18181                );
18182                if inlined != (member_inline_size <= 4) {
18183                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18184                }
18185                let inner_offset;
18186                let mut inner_depth = depth.clone();
18187                if inlined {
18188                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18189                    inner_offset = next_offset;
18190                } else {
18191                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18192                    inner_depth.increment()?;
18193                }
18194                let val_ref =
18195                self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18196                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18197                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18198                {
18199                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18200                }
18201                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18202                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18203                }
18204            }
18205
18206            next_offset += envelope_size;
18207            _next_ordinal_to_read += 1;
18208            if next_offset >= end_offset {
18209                return Ok(());
18210            }
18211
18212            // Decode unknown envelopes for gaps in ordinals.
18213            while _next_ordinal_to_read < 2 {
18214                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18215                _next_ordinal_to_read += 1;
18216                next_offset += envelope_size;
18217            }
18218
18219            let next_out_of_line = decoder.next_out_of_line();
18220            let handles_before = decoder.remaining_handles();
18221            if let Some((inlined, num_bytes, num_handles)) =
18222                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18223            {
18224                let member_inline_size =
18225                    <u32 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18226                if inlined != (member_inline_size <= 4) {
18227                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18228                }
18229                let inner_offset;
18230                let mut inner_depth = depth.clone();
18231                if inlined {
18232                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18233                    inner_offset = next_offset;
18234                } else {
18235                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18236                    inner_depth.increment()?;
18237                }
18238                let val_ref = self.buffers_remaining.get_or_insert_with(|| {
18239                    fidl::new_empty!(u32, fidl::encoding::DefaultFuchsiaResourceDialect)
18240                });
18241                fidl::decode!(
18242                    u32,
18243                    fidl::encoding::DefaultFuchsiaResourceDialect,
18244                    val_ref,
18245                    decoder,
18246                    inner_offset,
18247                    inner_depth
18248                )?;
18249                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18250                {
18251                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18252                }
18253                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18254                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18255                }
18256            }
18257
18258            next_offset += envelope_size;
18259
18260            // Decode the remaining unknown envelopes.
18261            while next_offset < end_offset {
18262                _next_ordinal_to_read += 1;
18263                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18264                next_offset += envelope_size;
18265            }
18266
18267            Ok(())
18268        }
18269    }
18270
18271    impl BufferCollectionAttachTokenRequest {
18272        #[inline(always)]
18273        fn max_ordinal_present(&self) -> u64 {
18274            if let Some(_) = self.token_request {
18275                return 2;
18276            }
18277            if let Some(_) = self.rights_attenuation_mask {
18278                return 1;
18279            }
18280            0
18281        }
18282    }
18283
18284    impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachTokenRequest {
18285        type Borrowed<'a> = &'a mut Self;
18286        fn take_or_borrow<'a>(
18287            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18288        ) -> Self::Borrowed<'a> {
18289            value
18290        }
18291    }
18292
18293    unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachTokenRequest {
18294        type Owned = Self;
18295
18296        #[inline(always)]
18297        fn inline_align(_context: fidl::encoding::Context) -> usize {
18298            8
18299        }
18300
18301        #[inline(always)]
18302        fn inline_size(_context: fidl::encoding::Context) -> usize {
18303            16
18304        }
18305    }
18306
18307    unsafe impl
18308        fidl::encoding::Encode<
18309            BufferCollectionAttachTokenRequest,
18310            fidl::encoding::DefaultFuchsiaResourceDialect,
18311        > for &mut BufferCollectionAttachTokenRequest
18312    {
18313        unsafe fn encode(
18314            self,
18315            encoder: &mut fidl::encoding::Encoder<
18316                '_,
18317                fidl::encoding::DefaultFuchsiaResourceDialect,
18318            >,
18319            offset: usize,
18320            mut depth: fidl::encoding::Depth,
18321        ) -> fidl::Result<()> {
18322            encoder.debug_check_bounds::<BufferCollectionAttachTokenRequest>(offset);
18323            // Vector header
18324            let max_ordinal: u64 = self.max_ordinal_present();
18325            encoder.write_num(max_ordinal, offset);
18326            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18327            // Calling encoder.out_of_line_offset(0) is not allowed.
18328            if max_ordinal == 0 {
18329                return Ok(());
18330            }
18331            depth.increment()?;
18332            let envelope_size = 8;
18333            let bytes_len = max_ordinal as usize * envelope_size;
18334            #[allow(unused_variables)]
18335            let offset = encoder.out_of_line_offset(bytes_len);
18336            let mut _prev_end_offset: usize = 0;
18337            if 1 > max_ordinal {
18338                return Ok(());
18339            }
18340
18341            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18342            // are envelope_size bytes.
18343            let cur_offset: usize = (1 - 1) * envelope_size;
18344
18345            // Zero reserved fields.
18346            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18347
18348            // Safety:
18349            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18350            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18351            //   envelope_size bytes, there is always sufficient room.
18352            fidl::encoding::encode_in_envelope_optional::<
18353                fidl::Rights,
18354                fidl::encoding::DefaultFuchsiaResourceDialect,
18355            >(
18356                self.rights_attenuation_mask
18357                    .as_ref()
18358                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
18359                encoder,
18360                offset + cur_offset,
18361                depth,
18362            )?;
18363
18364            _prev_end_offset = cur_offset + envelope_size;
18365            if 2 > max_ordinal {
18366                return Ok(());
18367            }
18368
18369            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18370            // are envelope_size bytes.
18371            let cur_offset: usize = (2 - 1) * envelope_size;
18372
18373            // Zero reserved fields.
18374            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18375
18376            // Safety:
18377            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18378            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18379            //   envelope_size bytes, there is always sufficient room.
18380            fidl::encoding::encode_in_envelope_optional::<
18381                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
18382                fidl::encoding::DefaultFuchsiaResourceDialect,
18383            >(
18384                self.token_request.as_mut().map(
18385                    <fidl::encoding::Endpoint<
18386                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18387                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18388                ),
18389                encoder,
18390                offset + cur_offset,
18391                depth,
18392            )?;
18393
18394            _prev_end_offset = cur_offset + envelope_size;
18395
18396            Ok(())
18397        }
18398    }
18399
18400    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18401        for BufferCollectionAttachTokenRequest
18402    {
18403        #[inline(always)]
18404        fn new_empty() -> Self {
18405            Self::default()
18406        }
18407
18408        unsafe fn decode(
18409            &mut self,
18410            decoder: &mut fidl::encoding::Decoder<
18411                '_,
18412                fidl::encoding::DefaultFuchsiaResourceDialect,
18413            >,
18414            offset: usize,
18415            mut depth: fidl::encoding::Depth,
18416        ) -> fidl::Result<()> {
18417            decoder.debug_check_bounds::<Self>(offset);
18418            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18419                None => return Err(fidl::Error::NotNullable),
18420                Some(len) => len,
18421            };
18422            // Calling decoder.out_of_line_offset(0) is not allowed.
18423            if len == 0 {
18424                return Ok(());
18425            };
18426            depth.increment()?;
18427            let envelope_size = 8;
18428            let bytes_len = len * envelope_size;
18429            let offset = decoder.out_of_line_offset(bytes_len)?;
18430            // Decode the envelope for each type.
18431            let mut _next_ordinal_to_read = 0;
18432            let mut next_offset = offset;
18433            let end_offset = offset + bytes_len;
18434            _next_ordinal_to_read += 1;
18435            if next_offset >= end_offset {
18436                return Ok(());
18437            }
18438
18439            // Decode unknown envelopes for gaps in ordinals.
18440            while _next_ordinal_to_read < 1 {
18441                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18442                _next_ordinal_to_read += 1;
18443                next_offset += envelope_size;
18444            }
18445
18446            let next_out_of_line = decoder.next_out_of_line();
18447            let handles_before = decoder.remaining_handles();
18448            if let Some((inlined, num_bytes, num_handles)) =
18449                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18450            {
18451                let member_inline_size =
18452                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18453                if inlined != (member_inline_size <= 4) {
18454                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18455                }
18456                let inner_offset;
18457                let mut inner_depth = depth.clone();
18458                if inlined {
18459                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18460                    inner_offset = next_offset;
18461                } else {
18462                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18463                    inner_depth.increment()?;
18464                }
18465                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
18466                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
18467                });
18468                fidl::decode!(
18469                    fidl::Rights,
18470                    fidl::encoding::DefaultFuchsiaResourceDialect,
18471                    val_ref,
18472                    decoder,
18473                    inner_offset,
18474                    inner_depth
18475                )?;
18476                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18477                {
18478                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18479                }
18480                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18481                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18482                }
18483            }
18484
18485            next_offset += envelope_size;
18486            _next_ordinal_to_read += 1;
18487            if next_offset >= end_offset {
18488                return Ok(());
18489            }
18490
18491            // Decode unknown envelopes for gaps in ordinals.
18492            while _next_ordinal_to_read < 2 {
18493                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18494                _next_ordinal_to_read += 1;
18495                next_offset += envelope_size;
18496            }
18497
18498            let next_out_of_line = decoder.next_out_of_line();
18499            let handles_before = decoder.remaining_handles();
18500            if let Some((inlined, num_bytes, num_handles)) =
18501                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18502            {
18503                let member_inline_size = <fidl::encoding::Endpoint<
18504                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18505                > as fidl::encoding::TypeMarker>::inline_size(
18506                    decoder.context
18507                );
18508                if inlined != (member_inline_size <= 4) {
18509                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18510                }
18511                let inner_offset;
18512                let mut inner_depth = depth.clone();
18513                if inlined {
18514                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18515                    inner_offset = next_offset;
18516                } else {
18517                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18518                    inner_depth.increment()?;
18519                }
18520                let val_ref = self.token_request.get_or_insert_with(|| {
18521                    fidl::new_empty!(
18522                        fidl::encoding::Endpoint<
18523                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18524                        >,
18525                        fidl::encoding::DefaultFuchsiaResourceDialect
18526                    )
18527                });
18528                fidl::decode!(
18529                    fidl::encoding::Endpoint<
18530                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18531                    >,
18532                    fidl::encoding::DefaultFuchsiaResourceDialect,
18533                    val_ref,
18534                    decoder,
18535                    inner_offset,
18536                    inner_depth
18537                )?;
18538                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18539                {
18540                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18541                }
18542                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18543                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18544                }
18545            }
18546
18547            next_offset += envelope_size;
18548
18549            // Decode the remaining unknown envelopes.
18550            while next_offset < end_offset {
18551                _next_ordinal_to_read += 1;
18552                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18553                next_offset += envelope_size;
18554            }
18555
18556            Ok(())
18557        }
18558    }
18559
18560    impl BufferCollectionInfo {
18561        #[inline(always)]
18562        fn max_ordinal_present(&self) -> u64 {
18563            if let Some(_) = self.buffer_collection_id {
18564                return 3;
18565            }
18566            if let Some(_) = self.buffers {
18567                return 2;
18568            }
18569            if let Some(_) = self.settings {
18570                return 1;
18571            }
18572            0
18573        }
18574    }
18575
18576    impl fidl::encoding::ResourceTypeMarker for BufferCollectionInfo {
18577        type Borrowed<'a> = &'a mut Self;
18578        fn take_or_borrow<'a>(
18579            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18580        ) -> Self::Borrowed<'a> {
18581            value
18582        }
18583    }
18584
18585    unsafe impl fidl::encoding::TypeMarker for BufferCollectionInfo {
18586        type Owned = Self;
18587
18588        #[inline(always)]
18589        fn inline_align(_context: fidl::encoding::Context) -> usize {
18590            8
18591        }
18592
18593        #[inline(always)]
18594        fn inline_size(_context: fidl::encoding::Context) -> usize {
18595            16
18596        }
18597    }
18598
18599    unsafe impl
18600        fidl::encoding::Encode<BufferCollectionInfo, fidl::encoding::DefaultFuchsiaResourceDialect>
18601        for &mut BufferCollectionInfo
18602    {
18603        unsafe fn encode(
18604            self,
18605            encoder: &mut fidl::encoding::Encoder<
18606                '_,
18607                fidl::encoding::DefaultFuchsiaResourceDialect,
18608            >,
18609            offset: usize,
18610            mut depth: fidl::encoding::Depth,
18611        ) -> fidl::Result<()> {
18612            encoder.debug_check_bounds::<BufferCollectionInfo>(offset);
18613            // Vector header
18614            let max_ordinal: u64 = self.max_ordinal_present();
18615            encoder.write_num(max_ordinal, offset);
18616            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18617            // Calling encoder.out_of_line_offset(0) is not allowed.
18618            if max_ordinal == 0 {
18619                return Ok(());
18620            }
18621            depth.increment()?;
18622            let envelope_size = 8;
18623            let bytes_len = max_ordinal as usize * envelope_size;
18624            #[allow(unused_variables)]
18625            let offset = encoder.out_of_line_offset(bytes_len);
18626            let mut _prev_end_offset: usize = 0;
18627            if 1 > max_ordinal {
18628                return Ok(());
18629            }
18630
18631            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18632            // are envelope_size bytes.
18633            let cur_offset: usize = (1 - 1) * envelope_size;
18634
18635            // Zero reserved fields.
18636            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18637
18638            // Safety:
18639            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18640            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18641            //   envelope_size bytes, there is always sufficient room.
18642            fidl::encoding::encode_in_envelope_optional::<
18643                SingleBufferSettings,
18644                fidl::encoding::DefaultFuchsiaResourceDialect,
18645            >(
18646                self.settings
18647                    .as_ref()
18648                    .map(<SingleBufferSettings as fidl::encoding::ValueTypeMarker>::borrow),
18649                encoder,
18650                offset + cur_offset,
18651                depth,
18652            )?;
18653
18654            _prev_end_offset = cur_offset + envelope_size;
18655            if 2 > max_ordinal {
18656                return Ok(());
18657            }
18658
18659            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18660            // are envelope_size bytes.
18661            let cur_offset: usize = (2 - 1) * envelope_size;
18662
18663            // Zero reserved fields.
18664            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18665
18666            // Safety:
18667            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18668            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18669            //   envelope_size bytes, there is always sufficient room.
18670            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect>(
18671            self.buffers.as_mut().map(<fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
18672            encoder, offset + cur_offset, depth
18673        )?;
18674
18675            _prev_end_offset = cur_offset + envelope_size;
18676            if 3 > max_ordinal {
18677                return Ok(());
18678            }
18679
18680            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18681            // are envelope_size bytes.
18682            let cur_offset: usize = (3 - 1) * envelope_size;
18683
18684            // Zero reserved fields.
18685            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18686
18687            // Safety:
18688            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18689            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18690            //   envelope_size bytes, there is always sufficient room.
18691            fidl::encoding::encode_in_envelope_optional::<
18692                u64,
18693                fidl::encoding::DefaultFuchsiaResourceDialect,
18694            >(
18695                self.buffer_collection_id
18696                    .as_ref()
18697                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
18698                encoder,
18699                offset + cur_offset,
18700                depth,
18701            )?;
18702
18703            _prev_end_offset = cur_offset + envelope_size;
18704
18705            Ok(())
18706        }
18707    }
18708
18709    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18710        for BufferCollectionInfo
18711    {
18712        #[inline(always)]
18713        fn new_empty() -> Self {
18714            Self::default()
18715        }
18716
18717        unsafe fn decode(
18718            &mut self,
18719            decoder: &mut fidl::encoding::Decoder<
18720                '_,
18721                fidl::encoding::DefaultFuchsiaResourceDialect,
18722            >,
18723            offset: usize,
18724            mut depth: fidl::encoding::Depth,
18725        ) -> fidl::Result<()> {
18726            decoder.debug_check_bounds::<Self>(offset);
18727            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18728                None => return Err(fidl::Error::NotNullable),
18729                Some(len) => len,
18730            };
18731            // Calling decoder.out_of_line_offset(0) is not allowed.
18732            if len == 0 {
18733                return Ok(());
18734            };
18735            depth.increment()?;
18736            let envelope_size = 8;
18737            let bytes_len = len * envelope_size;
18738            let offset = decoder.out_of_line_offset(bytes_len)?;
18739            // Decode the envelope for each type.
18740            let mut _next_ordinal_to_read = 0;
18741            let mut next_offset = offset;
18742            let end_offset = offset + bytes_len;
18743            _next_ordinal_to_read += 1;
18744            if next_offset >= end_offset {
18745                return Ok(());
18746            }
18747
18748            // Decode unknown envelopes for gaps in ordinals.
18749            while _next_ordinal_to_read < 1 {
18750                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18751                _next_ordinal_to_read += 1;
18752                next_offset += envelope_size;
18753            }
18754
18755            let next_out_of_line = decoder.next_out_of_line();
18756            let handles_before = decoder.remaining_handles();
18757            if let Some((inlined, num_bytes, num_handles)) =
18758                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18759            {
18760                let member_inline_size =
18761                    <SingleBufferSettings as fidl::encoding::TypeMarker>::inline_size(
18762                        decoder.context,
18763                    );
18764                if inlined != (member_inline_size <= 4) {
18765                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18766                }
18767                let inner_offset;
18768                let mut inner_depth = depth.clone();
18769                if inlined {
18770                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18771                    inner_offset = next_offset;
18772                } else {
18773                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18774                    inner_depth.increment()?;
18775                }
18776                let val_ref = self.settings.get_or_insert_with(|| {
18777                    fidl::new_empty!(
18778                        SingleBufferSettings,
18779                        fidl::encoding::DefaultFuchsiaResourceDialect
18780                    )
18781                });
18782                fidl::decode!(
18783                    SingleBufferSettings,
18784                    fidl::encoding::DefaultFuchsiaResourceDialect,
18785                    val_ref,
18786                    decoder,
18787                    inner_offset,
18788                    inner_depth
18789                )?;
18790                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18791                {
18792                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18793                }
18794                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18795                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18796                }
18797            }
18798
18799            next_offset += envelope_size;
18800            _next_ordinal_to_read += 1;
18801            if next_offset >= end_offset {
18802                return Ok(());
18803            }
18804
18805            // Decode unknown envelopes for gaps in ordinals.
18806            while _next_ordinal_to_read < 2 {
18807                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18808                _next_ordinal_to_read += 1;
18809                next_offset += envelope_size;
18810            }
18811
18812            let next_out_of_line = decoder.next_out_of_line();
18813            let handles_before = decoder.remaining_handles();
18814            if let Some((inlined, num_bytes, num_handles)) =
18815                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18816            {
18817                let member_inline_size = <fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18818                if inlined != (member_inline_size <= 4) {
18819                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18820                }
18821                let inner_offset;
18822                let mut inner_depth = depth.clone();
18823                if inlined {
18824                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18825                    inner_offset = next_offset;
18826                } else {
18827                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18828                    inner_depth.increment()?;
18829                }
18830                let val_ref =
18831                self.buffers.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect));
18832                fidl::decode!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18833                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18834                {
18835                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18836                }
18837                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18838                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18839                }
18840            }
18841
18842            next_offset += envelope_size;
18843            _next_ordinal_to_read += 1;
18844            if next_offset >= end_offset {
18845                return Ok(());
18846            }
18847
18848            // Decode unknown envelopes for gaps in ordinals.
18849            while _next_ordinal_to_read < 3 {
18850                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18851                _next_ordinal_to_read += 1;
18852                next_offset += envelope_size;
18853            }
18854
18855            let next_out_of_line = decoder.next_out_of_line();
18856            let handles_before = decoder.remaining_handles();
18857            if let Some((inlined, num_bytes, num_handles)) =
18858                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18859            {
18860                let member_inline_size =
18861                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18862                if inlined != (member_inline_size <= 4) {
18863                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18864                }
18865                let inner_offset;
18866                let mut inner_depth = depth.clone();
18867                if inlined {
18868                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18869                    inner_offset = next_offset;
18870                } else {
18871                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18872                    inner_depth.increment()?;
18873                }
18874                let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
18875                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
18876                });
18877                fidl::decode!(
18878                    u64,
18879                    fidl::encoding::DefaultFuchsiaResourceDialect,
18880                    val_ref,
18881                    decoder,
18882                    inner_offset,
18883                    inner_depth
18884                )?;
18885                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18886                {
18887                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18888                }
18889                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18890                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18891                }
18892            }
18893
18894            next_offset += envelope_size;
18895
18896            // Decode the remaining unknown envelopes.
18897            while next_offset < end_offset {
18898                _next_ordinal_to_read += 1;
18899                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18900                next_offset += envelope_size;
18901            }
18902
18903            Ok(())
18904        }
18905    }
18906
18907    impl BufferCollectionSetConstraintsRequest {
18908        #[inline(always)]
18909        fn max_ordinal_present(&self) -> u64 {
18910            if let Some(_) = self.constraints {
18911                return 1;
18912            }
18913            0
18914        }
18915    }
18916
18917    impl fidl::encoding::ResourceTypeMarker for BufferCollectionSetConstraintsRequest {
18918        type Borrowed<'a> = &'a mut Self;
18919        fn take_or_borrow<'a>(
18920            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18921        ) -> Self::Borrowed<'a> {
18922            value
18923        }
18924    }
18925
18926    unsafe impl fidl::encoding::TypeMarker for BufferCollectionSetConstraintsRequest {
18927        type Owned = Self;
18928
18929        #[inline(always)]
18930        fn inline_align(_context: fidl::encoding::Context) -> usize {
18931            8
18932        }
18933
18934        #[inline(always)]
18935        fn inline_size(_context: fidl::encoding::Context) -> usize {
18936            16
18937        }
18938    }
18939
18940    unsafe impl
18941        fidl::encoding::Encode<
18942            BufferCollectionSetConstraintsRequest,
18943            fidl::encoding::DefaultFuchsiaResourceDialect,
18944        > for &mut BufferCollectionSetConstraintsRequest
18945    {
18946        unsafe fn encode(
18947            self,
18948            encoder: &mut fidl::encoding::Encoder<
18949                '_,
18950                fidl::encoding::DefaultFuchsiaResourceDialect,
18951            >,
18952            offset: usize,
18953            mut depth: fidl::encoding::Depth,
18954        ) -> fidl::Result<()> {
18955            encoder.debug_check_bounds::<BufferCollectionSetConstraintsRequest>(offset);
18956            // Vector header
18957            let max_ordinal: u64 = self.max_ordinal_present();
18958            encoder.write_num(max_ordinal, offset);
18959            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18960            // Calling encoder.out_of_line_offset(0) is not allowed.
18961            if max_ordinal == 0 {
18962                return Ok(());
18963            }
18964            depth.increment()?;
18965            let envelope_size = 8;
18966            let bytes_len = max_ordinal as usize * envelope_size;
18967            #[allow(unused_variables)]
18968            let offset = encoder.out_of_line_offset(bytes_len);
18969            let mut _prev_end_offset: usize = 0;
18970            if 1 > max_ordinal {
18971                return Ok(());
18972            }
18973
18974            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18975            // are envelope_size bytes.
18976            let cur_offset: usize = (1 - 1) * envelope_size;
18977
18978            // Zero reserved fields.
18979            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18980
18981            // Safety:
18982            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18983            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18984            //   envelope_size bytes, there is always sufficient room.
18985            fidl::encoding::encode_in_envelope_optional::<
18986                BufferCollectionConstraints,
18987                fidl::encoding::DefaultFuchsiaResourceDialect,
18988            >(
18989                self.constraints
18990                    .as_ref()
18991                    .map(<BufferCollectionConstraints as fidl::encoding::ValueTypeMarker>::borrow),
18992                encoder,
18993                offset + cur_offset,
18994                depth,
18995            )?;
18996
18997            _prev_end_offset = cur_offset + envelope_size;
18998
18999            Ok(())
19000        }
19001    }
19002
19003    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19004        for BufferCollectionSetConstraintsRequest
19005    {
19006        #[inline(always)]
19007        fn new_empty() -> Self {
19008            Self::default()
19009        }
19010
19011        unsafe fn decode(
19012            &mut self,
19013            decoder: &mut fidl::encoding::Decoder<
19014                '_,
19015                fidl::encoding::DefaultFuchsiaResourceDialect,
19016            >,
19017            offset: usize,
19018            mut depth: fidl::encoding::Depth,
19019        ) -> fidl::Result<()> {
19020            decoder.debug_check_bounds::<Self>(offset);
19021            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19022                None => return Err(fidl::Error::NotNullable),
19023                Some(len) => len,
19024            };
19025            // Calling decoder.out_of_line_offset(0) is not allowed.
19026            if len == 0 {
19027                return Ok(());
19028            };
19029            depth.increment()?;
19030            let envelope_size = 8;
19031            let bytes_len = len * envelope_size;
19032            let offset = decoder.out_of_line_offset(bytes_len)?;
19033            // Decode the envelope for each type.
19034            let mut _next_ordinal_to_read = 0;
19035            let mut next_offset = offset;
19036            let end_offset = offset + bytes_len;
19037            _next_ordinal_to_read += 1;
19038            if next_offset >= end_offset {
19039                return Ok(());
19040            }
19041
19042            // Decode unknown envelopes for gaps in ordinals.
19043            while _next_ordinal_to_read < 1 {
19044                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19045                _next_ordinal_to_read += 1;
19046                next_offset += envelope_size;
19047            }
19048
19049            let next_out_of_line = decoder.next_out_of_line();
19050            let handles_before = decoder.remaining_handles();
19051            if let Some((inlined, num_bytes, num_handles)) =
19052                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19053            {
19054                let member_inline_size =
19055                    <BufferCollectionConstraints as fidl::encoding::TypeMarker>::inline_size(
19056                        decoder.context,
19057                    );
19058                if inlined != (member_inline_size <= 4) {
19059                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19060                }
19061                let inner_offset;
19062                let mut inner_depth = depth.clone();
19063                if inlined {
19064                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19065                    inner_offset = next_offset;
19066                } else {
19067                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19068                    inner_depth.increment()?;
19069                }
19070                let val_ref = self.constraints.get_or_insert_with(|| {
19071                    fidl::new_empty!(
19072                        BufferCollectionConstraints,
19073                        fidl::encoding::DefaultFuchsiaResourceDialect
19074                    )
19075                });
19076                fidl::decode!(
19077                    BufferCollectionConstraints,
19078                    fidl::encoding::DefaultFuchsiaResourceDialect,
19079                    val_ref,
19080                    decoder,
19081                    inner_offset,
19082                    inner_depth
19083                )?;
19084                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19085                {
19086                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19087                }
19088                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19089                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19090                }
19091            }
19092
19093            next_offset += envelope_size;
19094
19095            // Decode the remaining unknown envelopes.
19096            while next_offset < end_offset {
19097                _next_ordinal_to_read += 1;
19098                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19099                next_offset += envelope_size;
19100            }
19101
19102            Ok(())
19103        }
19104    }
19105
19106    impl BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
19107        #[inline(always)]
19108        fn max_ordinal_present(&self) -> u64 {
19109            if let Some(_) = self.group_request {
19110                return 1;
19111            }
19112            0
19113        }
19114    }
19115
19116    impl fidl::encoding::ResourceTypeMarker
19117        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19118    {
19119        type Borrowed<'a> = &'a mut Self;
19120        fn take_or_borrow<'a>(
19121            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19122        ) -> Self::Borrowed<'a> {
19123            value
19124        }
19125    }
19126
19127    unsafe impl fidl::encoding::TypeMarker
19128        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19129    {
19130        type Owned = Self;
19131
19132        #[inline(always)]
19133        fn inline_align(_context: fidl::encoding::Context) -> usize {
19134            8
19135        }
19136
19137        #[inline(always)]
19138        fn inline_size(_context: fidl::encoding::Context) -> usize {
19139            16
19140        }
19141    }
19142
19143    unsafe impl
19144        fidl::encoding::Encode<
19145            BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
19146            fidl::encoding::DefaultFuchsiaResourceDialect,
19147        > for &mut BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19148    {
19149        unsafe fn encode(
19150            self,
19151            encoder: &mut fidl::encoding::Encoder<
19152                '_,
19153                fidl::encoding::DefaultFuchsiaResourceDialect,
19154            >,
19155            offset: usize,
19156            mut depth: fidl::encoding::Depth,
19157        ) -> fidl::Result<()> {
19158            encoder
19159                .debug_check_bounds::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
19160                    offset,
19161                );
19162            // Vector header
19163            let max_ordinal: u64 = self.max_ordinal_present();
19164            encoder.write_num(max_ordinal, offset);
19165            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19166            // Calling encoder.out_of_line_offset(0) is not allowed.
19167            if max_ordinal == 0 {
19168                return Ok(());
19169            }
19170            depth.increment()?;
19171            let envelope_size = 8;
19172            let bytes_len = max_ordinal as usize * envelope_size;
19173            #[allow(unused_variables)]
19174            let offset = encoder.out_of_line_offset(bytes_len);
19175            let mut _prev_end_offset: usize = 0;
19176            if 1 > max_ordinal {
19177                return Ok(());
19178            }
19179
19180            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19181            // are envelope_size bytes.
19182            let cur_offset: usize = (1 - 1) * envelope_size;
19183
19184            // Zero reserved fields.
19185            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19186
19187            // Safety:
19188            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19189            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19190            //   envelope_size bytes, there is always sufficient room.
19191            fidl::encoding::encode_in_envelope_optional::<
19192                fidl::encoding::Endpoint<
19193                    fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19194                >,
19195                fidl::encoding::DefaultFuchsiaResourceDialect,
19196            >(
19197                self.group_request.as_mut().map(
19198                    <fidl::encoding::Endpoint<
19199                        fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19200                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19201                ),
19202                encoder,
19203                offset + cur_offset,
19204                depth,
19205            )?;
19206
19207            _prev_end_offset = cur_offset + envelope_size;
19208
19209            Ok(())
19210        }
19211    }
19212
19213    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19214        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19215    {
19216        #[inline(always)]
19217        fn new_empty() -> Self {
19218            Self::default()
19219        }
19220
19221        unsafe fn decode(
19222            &mut self,
19223            decoder: &mut fidl::encoding::Decoder<
19224                '_,
19225                fidl::encoding::DefaultFuchsiaResourceDialect,
19226            >,
19227            offset: usize,
19228            mut depth: fidl::encoding::Depth,
19229        ) -> fidl::Result<()> {
19230            decoder.debug_check_bounds::<Self>(offset);
19231            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19232                None => return Err(fidl::Error::NotNullable),
19233                Some(len) => len,
19234            };
19235            // Calling decoder.out_of_line_offset(0) is not allowed.
19236            if len == 0 {
19237                return Ok(());
19238            };
19239            depth.increment()?;
19240            let envelope_size = 8;
19241            let bytes_len = len * envelope_size;
19242            let offset = decoder.out_of_line_offset(bytes_len)?;
19243            // Decode the envelope for each type.
19244            let mut _next_ordinal_to_read = 0;
19245            let mut next_offset = offset;
19246            let end_offset = offset + bytes_len;
19247            _next_ordinal_to_read += 1;
19248            if next_offset >= end_offset {
19249                return Ok(());
19250            }
19251
19252            // Decode unknown envelopes for gaps in ordinals.
19253            while _next_ordinal_to_read < 1 {
19254                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19255                _next_ordinal_to_read += 1;
19256                next_offset += envelope_size;
19257            }
19258
19259            let next_out_of_line = decoder.next_out_of_line();
19260            let handles_before = decoder.remaining_handles();
19261            if let Some((inlined, num_bytes, num_handles)) =
19262                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19263            {
19264                let member_inline_size = <fidl::encoding::Endpoint<
19265                    fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19266                > as fidl::encoding::TypeMarker>::inline_size(
19267                    decoder.context
19268                );
19269                if inlined != (member_inline_size <= 4) {
19270                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19271                }
19272                let inner_offset;
19273                let mut inner_depth = depth.clone();
19274                if inlined {
19275                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19276                    inner_offset = next_offset;
19277                } else {
19278                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19279                    inner_depth.increment()?;
19280                }
19281                let val_ref = self.group_request.get_or_insert_with(|| {
19282                    fidl::new_empty!(
19283                        fidl::encoding::Endpoint<
19284                            fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19285                        >,
19286                        fidl::encoding::DefaultFuchsiaResourceDialect
19287                    )
19288                });
19289                fidl::decode!(
19290                    fidl::encoding::Endpoint<
19291                        fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19292                    >,
19293                    fidl::encoding::DefaultFuchsiaResourceDialect,
19294                    val_ref,
19295                    decoder,
19296                    inner_offset,
19297                    inner_depth
19298                )?;
19299                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19300                {
19301                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19302                }
19303                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19304                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19305                }
19306            }
19307
19308            next_offset += envelope_size;
19309
19310            // Decode the remaining unknown envelopes.
19311            while next_offset < end_offset {
19312                _next_ordinal_to_read += 1;
19313                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19314                next_offset += envelope_size;
19315            }
19316
19317            Ok(())
19318        }
19319    }
19320
19321    impl BufferCollectionTokenDuplicateRequest {
19322        #[inline(always)]
19323        fn max_ordinal_present(&self) -> u64 {
19324            if let Some(_) = self.token_request {
19325                return 2;
19326            }
19327            if let Some(_) = self.rights_attenuation_mask {
19328                return 1;
19329            }
19330            0
19331        }
19332    }
19333
19334    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateRequest {
19335        type Borrowed<'a> = &'a mut Self;
19336        fn take_or_borrow<'a>(
19337            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19338        ) -> Self::Borrowed<'a> {
19339            value
19340        }
19341    }
19342
19343    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateRequest {
19344        type Owned = Self;
19345
19346        #[inline(always)]
19347        fn inline_align(_context: fidl::encoding::Context) -> usize {
19348            8
19349        }
19350
19351        #[inline(always)]
19352        fn inline_size(_context: fidl::encoding::Context) -> usize {
19353            16
19354        }
19355    }
19356
19357    unsafe impl
19358        fidl::encoding::Encode<
19359            BufferCollectionTokenDuplicateRequest,
19360            fidl::encoding::DefaultFuchsiaResourceDialect,
19361        > for &mut BufferCollectionTokenDuplicateRequest
19362    {
19363        unsafe fn encode(
19364            self,
19365            encoder: &mut fidl::encoding::Encoder<
19366                '_,
19367                fidl::encoding::DefaultFuchsiaResourceDialect,
19368            >,
19369            offset: usize,
19370            mut depth: fidl::encoding::Depth,
19371        ) -> fidl::Result<()> {
19372            encoder.debug_check_bounds::<BufferCollectionTokenDuplicateRequest>(offset);
19373            // Vector header
19374            let max_ordinal: u64 = self.max_ordinal_present();
19375            encoder.write_num(max_ordinal, offset);
19376            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19377            // Calling encoder.out_of_line_offset(0) is not allowed.
19378            if max_ordinal == 0 {
19379                return Ok(());
19380            }
19381            depth.increment()?;
19382            let envelope_size = 8;
19383            let bytes_len = max_ordinal as usize * envelope_size;
19384            #[allow(unused_variables)]
19385            let offset = encoder.out_of_line_offset(bytes_len);
19386            let mut _prev_end_offset: usize = 0;
19387            if 1 > max_ordinal {
19388                return Ok(());
19389            }
19390
19391            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19392            // are envelope_size bytes.
19393            let cur_offset: usize = (1 - 1) * envelope_size;
19394
19395            // Zero reserved fields.
19396            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19397
19398            // Safety:
19399            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19400            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19401            //   envelope_size bytes, there is always sufficient room.
19402            fidl::encoding::encode_in_envelope_optional::<
19403                fidl::Rights,
19404                fidl::encoding::DefaultFuchsiaResourceDialect,
19405            >(
19406                self.rights_attenuation_mask
19407                    .as_ref()
19408                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19409                encoder,
19410                offset + cur_offset,
19411                depth,
19412            )?;
19413
19414            _prev_end_offset = cur_offset + envelope_size;
19415            if 2 > max_ordinal {
19416                return Ok(());
19417            }
19418
19419            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19420            // are envelope_size bytes.
19421            let cur_offset: usize = (2 - 1) * envelope_size;
19422
19423            // Zero reserved fields.
19424            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19425
19426            // Safety:
19427            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19428            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19429            //   envelope_size bytes, there is always sufficient room.
19430            fidl::encoding::encode_in_envelope_optional::<
19431                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19432                fidl::encoding::DefaultFuchsiaResourceDialect,
19433            >(
19434                self.token_request.as_mut().map(
19435                    <fidl::encoding::Endpoint<
19436                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19437                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19438                ),
19439                encoder,
19440                offset + cur_offset,
19441                depth,
19442            )?;
19443
19444            _prev_end_offset = cur_offset + envelope_size;
19445
19446            Ok(())
19447        }
19448    }
19449
19450    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19451        for BufferCollectionTokenDuplicateRequest
19452    {
19453        #[inline(always)]
19454        fn new_empty() -> Self {
19455            Self::default()
19456        }
19457
19458        unsafe fn decode(
19459            &mut self,
19460            decoder: &mut fidl::encoding::Decoder<
19461                '_,
19462                fidl::encoding::DefaultFuchsiaResourceDialect,
19463            >,
19464            offset: usize,
19465            mut depth: fidl::encoding::Depth,
19466        ) -> fidl::Result<()> {
19467            decoder.debug_check_bounds::<Self>(offset);
19468            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19469                None => return Err(fidl::Error::NotNullable),
19470                Some(len) => len,
19471            };
19472            // Calling decoder.out_of_line_offset(0) is not allowed.
19473            if len == 0 {
19474                return Ok(());
19475            };
19476            depth.increment()?;
19477            let envelope_size = 8;
19478            let bytes_len = len * envelope_size;
19479            let offset = decoder.out_of_line_offset(bytes_len)?;
19480            // Decode the envelope for each type.
19481            let mut _next_ordinal_to_read = 0;
19482            let mut next_offset = offset;
19483            let end_offset = offset + bytes_len;
19484            _next_ordinal_to_read += 1;
19485            if next_offset >= end_offset {
19486                return Ok(());
19487            }
19488
19489            // Decode unknown envelopes for gaps in ordinals.
19490            while _next_ordinal_to_read < 1 {
19491                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19492                _next_ordinal_to_read += 1;
19493                next_offset += envelope_size;
19494            }
19495
19496            let next_out_of_line = decoder.next_out_of_line();
19497            let handles_before = decoder.remaining_handles();
19498            if let Some((inlined, num_bytes, num_handles)) =
19499                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19500            {
19501                let member_inline_size =
19502                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19503                if inlined != (member_inline_size <= 4) {
19504                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19505                }
19506                let inner_offset;
19507                let mut inner_depth = depth.clone();
19508                if inlined {
19509                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19510                    inner_offset = next_offset;
19511                } else {
19512                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19513                    inner_depth.increment()?;
19514                }
19515                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
19516                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
19517                });
19518                fidl::decode!(
19519                    fidl::Rights,
19520                    fidl::encoding::DefaultFuchsiaResourceDialect,
19521                    val_ref,
19522                    decoder,
19523                    inner_offset,
19524                    inner_depth
19525                )?;
19526                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19527                {
19528                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19529                }
19530                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19531                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19532                }
19533            }
19534
19535            next_offset += envelope_size;
19536            _next_ordinal_to_read += 1;
19537            if next_offset >= end_offset {
19538                return Ok(());
19539            }
19540
19541            // Decode unknown envelopes for gaps in ordinals.
19542            while _next_ordinal_to_read < 2 {
19543                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19544                _next_ordinal_to_read += 1;
19545                next_offset += envelope_size;
19546            }
19547
19548            let next_out_of_line = decoder.next_out_of_line();
19549            let handles_before = decoder.remaining_handles();
19550            if let Some((inlined, num_bytes, num_handles)) =
19551                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19552            {
19553                let member_inline_size = <fidl::encoding::Endpoint<
19554                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19555                > as fidl::encoding::TypeMarker>::inline_size(
19556                    decoder.context
19557                );
19558                if inlined != (member_inline_size <= 4) {
19559                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19560                }
19561                let inner_offset;
19562                let mut inner_depth = depth.clone();
19563                if inlined {
19564                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19565                    inner_offset = next_offset;
19566                } else {
19567                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19568                    inner_depth.increment()?;
19569                }
19570                let val_ref = self.token_request.get_or_insert_with(|| {
19571                    fidl::new_empty!(
19572                        fidl::encoding::Endpoint<
19573                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19574                        >,
19575                        fidl::encoding::DefaultFuchsiaResourceDialect
19576                    )
19577                });
19578                fidl::decode!(
19579                    fidl::encoding::Endpoint<
19580                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19581                    >,
19582                    fidl::encoding::DefaultFuchsiaResourceDialect,
19583                    val_ref,
19584                    decoder,
19585                    inner_offset,
19586                    inner_depth
19587                )?;
19588                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19589                {
19590                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19591                }
19592                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19593                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19594                }
19595            }
19596
19597            next_offset += envelope_size;
19598
19599            // Decode the remaining unknown envelopes.
19600            while next_offset < end_offset {
19601                _next_ordinal_to_read += 1;
19602                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19603                next_offset += envelope_size;
19604            }
19605
19606            Ok(())
19607        }
19608    }
19609
19610    impl BufferCollectionTokenGroupCreateChildRequest {
19611        #[inline(always)]
19612        fn max_ordinal_present(&self) -> u64 {
19613            if let Some(_) = self.rights_attenuation_mask {
19614                return 2;
19615            }
19616            if let Some(_) = self.token_request {
19617                return 1;
19618            }
19619            0
19620        }
19621    }
19622
19623    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19624        type Borrowed<'a> = &'a mut Self;
19625        fn take_or_borrow<'a>(
19626            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19627        ) -> Self::Borrowed<'a> {
19628            value
19629        }
19630    }
19631
19632    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19633        type Owned = Self;
19634
19635        #[inline(always)]
19636        fn inline_align(_context: fidl::encoding::Context) -> usize {
19637            8
19638        }
19639
19640        #[inline(always)]
19641        fn inline_size(_context: fidl::encoding::Context) -> usize {
19642            16
19643        }
19644    }
19645
19646    unsafe impl
19647        fidl::encoding::Encode<
19648            BufferCollectionTokenGroupCreateChildRequest,
19649            fidl::encoding::DefaultFuchsiaResourceDialect,
19650        > for &mut BufferCollectionTokenGroupCreateChildRequest
19651    {
19652        unsafe fn encode(
19653            self,
19654            encoder: &mut fidl::encoding::Encoder<
19655                '_,
19656                fidl::encoding::DefaultFuchsiaResourceDialect,
19657            >,
19658            offset: usize,
19659            mut depth: fidl::encoding::Depth,
19660        ) -> fidl::Result<()> {
19661            encoder.debug_check_bounds::<BufferCollectionTokenGroupCreateChildRequest>(offset);
19662            // Vector header
19663            let max_ordinal: u64 = self.max_ordinal_present();
19664            encoder.write_num(max_ordinal, offset);
19665            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19666            // Calling encoder.out_of_line_offset(0) is not allowed.
19667            if max_ordinal == 0 {
19668                return Ok(());
19669            }
19670            depth.increment()?;
19671            let envelope_size = 8;
19672            let bytes_len = max_ordinal as usize * envelope_size;
19673            #[allow(unused_variables)]
19674            let offset = encoder.out_of_line_offset(bytes_len);
19675            let mut _prev_end_offset: usize = 0;
19676            if 1 > max_ordinal {
19677                return Ok(());
19678            }
19679
19680            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19681            // are envelope_size bytes.
19682            let cur_offset: usize = (1 - 1) * envelope_size;
19683
19684            // Zero reserved fields.
19685            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19686
19687            // Safety:
19688            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19689            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19690            //   envelope_size bytes, there is always sufficient room.
19691            fidl::encoding::encode_in_envelope_optional::<
19692                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19693                fidl::encoding::DefaultFuchsiaResourceDialect,
19694            >(
19695                self.token_request.as_mut().map(
19696                    <fidl::encoding::Endpoint<
19697                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19698                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19699                ),
19700                encoder,
19701                offset + cur_offset,
19702                depth,
19703            )?;
19704
19705            _prev_end_offset = cur_offset + envelope_size;
19706            if 2 > max_ordinal {
19707                return Ok(());
19708            }
19709
19710            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19711            // are envelope_size bytes.
19712            let cur_offset: usize = (2 - 1) * envelope_size;
19713
19714            // Zero reserved fields.
19715            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19716
19717            // Safety:
19718            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19719            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19720            //   envelope_size bytes, there is always sufficient room.
19721            fidl::encoding::encode_in_envelope_optional::<
19722                fidl::Rights,
19723                fidl::encoding::DefaultFuchsiaResourceDialect,
19724            >(
19725                self.rights_attenuation_mask
19726                    .as_ref()
19727                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19728                encoder,
19729                offset + cur_offset,
19730                depth,
19731            )?;
19732
19733            _prev_end_offset = cur_offset + envelope_size;
19734
19735            Ok(())
19736        }
19737    }
19738
19739    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19740        for BufferCollectionTokenGroupCreateChildRequest
19741    {
19742        #[inline(always)]
19743        fn new_empty() -> Self {
19744            Self::default()
19745        }
19746
19747        unsafe fn decode(
19748            &mut self,
19749            decoder: &mut fidl::encoding::Decoder<
19750                '_,
19751                fidl::encoding::DefaultFuchsiaResourceDialect,
19752            >,
19753            offset: usize,
19754            mut depth: fidl::encoding::Depth,
19755        ) -> fidl::Result<()> {
19756            decoder.debug_check_bounds::<Self>(offset);
19757            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19758                None => return Err(fidl::Error::NotNullable),
19759                Some(len) => len,
19760            };
19761            // Calling decoder.out_of_line_offset(0) is not allowed.
19762            if len == 0 {
19763                return Ok(());
19764            };
19765            depth.increment()?;
19766            let envelope_size = 8;
19767            let bytes_len = len * envelope_size;
19768            let offset = decoder.out_of_line_offset(bytes_len)?;
19769            // Decode the envelope for each type.
19770            let mut _next_ordinal_to_read = 0;
19771            let mut next_offset = offset;
19772            let end_offset = offset + bytes_len;
19773            _next_ordinal_to_read += 1;
19774            if next_offset >= end_offset {
19775                return Ok(());
19776            }
19777
19778            // Decode unknown envelopes for gaps in ordinals.
19779            while _next_ordinal_to_read < 1 {
19780                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19781                _next_ordinal_to_read += 1;
19782                next_offset += envelope_size;
19783            }
19784
19785            let next_out_of_line = decoder.next_out_of_line();
19786            let handles_before = decoder.remaining_handles();
19787            if let Some((inlined, num_bytes, num_handles)) =
19788                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19789            {
19790                let member_inline_size = <fidl::encoding::Endpoint<
19791                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19792                > as fidl::encoding::TypeMarker>::inline_size(
19793                    decoder.context
19794                );
19795                if inlined != (member_inline_size <= 4) {
19796                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19797                }
19798                let inner_offset;
19799                let mut inner_depth = depth.clone();
19800                if inlined {
19801                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19802                    inner_offset = next_offset;
19803                } else {
19804                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19805                    inner_depth.increment()?;
19806                }
19807                let val_ref = self.token_request.get_or_insert_with(|| {
19808                    fidl::new_empty!(
19809                        fidl::encoding::Endpoint<
19810                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19811                        >,
19812                        fidl::encoding::DefaultFuchsiaResourceDialect
19813                    )
19814                });
19815                fidl::decode!(
19816                    fidl::encoding::Endpoint<
19817                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19818                    >,
19819                    fidl::encoding::DefaultFuchsiaResourceDialect,
19820                    val_ref,
19821                    decoder,
19822                    inner_offset,
19823                    inner_depth
19824                )?;
19825                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19826                {
19827                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19828                }
19829                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19830                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19831                }
19832            }
19833
19834            next_offset += envelope_size;
19835            _next_ordinal_to_read += 1;
19836            if next_offset >= end_offset {
19837                return Ok(());
19838            }
19839
19840            // Decode unknown envelopes for gaps in ordinals.
19841            while _next_ordinal_to_read < 2 {
19842                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19843                _next_ordinal_to_read += 1;
19844                next_offset += envelope_size;
19845            }
19846
19847            let next_out_of_line = decoder.next_out_of_line();
19848            let handles_before = decoder.remaining_handles();
19849            if let Some((inlined, num_bytes, num_handles)) =
19850                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19851            {
19852                let member_inline_size =
19853                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19854                if inlined != (member_inline_size <= 4) {
19855                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19856                }
19857                let inner_offset;
19858                let mut inner_depth = depth.clone();
19859                if inlined {
19860                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19861                    inner_offset = next_offset;
19862                } else {
19863                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19864                    inner_depth.increment()?;
19865                }
19866                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
19867                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
19868                });
19869                fidl::decode!(
19870                    fidl::Rights,
19871                    fidl::encoding::DefaultFuchsiaResourceDialect,
19872                    val_ref,
19873                    decoder,
19874                    inner_offset,
19875                    inner_depth
19876                )?;
19877                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19878                {
19879                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19880                }
19881                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19882                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19883                }
19884            }
19885
19886            next_offset += envelope_size;
19887
19888            // Decode the remaining unknown envelopes.
19889            while next_offset < end_offset {
19890                _next_ordinal_to_read += 1;
19891                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19892                next_offset += envelope_size;
19893            }
19894
19895            Ok(())
19896        }
19897    }
19898
19899    impl BufferCollectionTokenGroupCreateChildrenSyncResponse {
19900        #[inline(always)]
19901        fn max_ordinal_present(&self) -> u64 {
19902            if let Some(_) = self.tokens {
19903                return 1;
19904            }
19905            0
19906        }
19907    }
19908
19909    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
19910        type Borrowed<'a> = &'a mut Self;
19911        fn take_or_borrow<'a>(
19912            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19913        ) -> Self::Borrowed<'a> {
19914            value
19915        }
19916    }
19917
19918    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
19919        type Owned = Self;
19920
19921        #[inline(always)]
19922        fn inline_align(_context: fidl::encoding::Context) -> usize {
19923            8
19924        }
19925
19926        #[inline(always)]
19927        fn inline_size(_context: fidl::encoding::Context) -> usize {
19928            16
19929        }
19930    }
19931
19932    unsafe impl
19933        fidl::encoding::Encode<
19934            BufferCollectionTokenGroupCreateChildrenSyncResponse,
19935            fidl::encoding::DefaultFuchsiaResourceDialect,
19936        > for &mut BufferCollectionTokenGroupCreateChildrenSyncResponse
19937    {
19938        unsafe fn encode(
19939            self,
19940            encoder: &mut fidl::encoding::Encoder<
19941                '_,
19942                fidl::encoding::DefaultFuchsiaResourceDialect,
19943            >,
19944            offset: usize,
19945            mut depth: fidl::encoding::Depth,
19946        ) -> fidl::Result<()> {
19947            encoder
19948                .debug_check_bounds::<BufferCollectionTokenGroupCreateChildrenSyncResponse>(offset);
19949            // Vector header
19950            let max_ordinal: u64 = self.max_ordinal_present();
19951            encoder.write_num(max_ordinal, offset);
19952            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19953            // Calling encoder.out_of_line_offset(0) is not allowed.
19954            if max_ordinal == 0 {
19955                return Ok(());
19956            }
19957            depth.increment()?;
19958            let envelope_size = 8;
19959            let bytes_len = max_ordinal as usize * envelope_size;
19960            #[allow(unused_variables)]
19961            let offset = encoder.out_of_line_offset(bytes_len);
19962            let mut _prev_end_offset: usize = 0;
19963            if 1 > max_ordinal {
19964                return Ok(());
19965            }
19966
19967            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19968            // are envelope_size bytes.
19969            let cur_offset: usize = (1 - 1) * envelope_size;
19970
19971            // Zero reserved fields.
19972            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19973
19974            // Safety:
19975            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19976            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19977            //   envelope_size bytes, there is always sufficient room.
19978            fidl::encoding::encode_in_envelope_optional::<
19979                fidl::encoding::Vector<
19980                    fidl::encoding::Endpoint<
19981                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
19982                    >,
19983                    64,
19984                >,
19985                fidl::encoding::DefaultFuchsiaResourceDialect,
19986            >(
19987                self.tokens.as_mut().map(
19988                    <fidl::encoding::Vector<
19989                        fidl::encoding::Endpoint<
19990                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
19991                        >,
19992                        64,
19993                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19994                ),
19995                encoder,
19996                offset + cur_offset,
19997                depth,
19998            )?;
19999
20000            _prev_end_offset = cur_offset + envelope_size;
20001
20002            Ok(())
20003        }
20004    }
20005
20006    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20007        for BufferCollectionTokenGroupCreateChildrenSyncResponse
20008    {
20009        #[inline(always)]
20010        fn new_empty() -> Self {
20011            Self::default()
20012        }
20013
20014        unsafe fn decode(
20015            &mut self,
20016            decoder: &mut fidl::encoding::Decoder<
20017                '_,
20018                fidl::encoding::DefaultFuchsiaResourceDialect,
20019            >,
20020            offset: usize,
20021            mut depth: fidl::encoding::Depth,
20022        ) -> fidl::Result<()> {
20023            decoder.debug_check_bounds::<Self>(offset);
20024            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20025                None => return Err(fidl::Error::NotNullable),
20026                Some(len) => len,
20027            };
20028            // Calling decoder.out_of_line_offset(0) is not allowed.
20029            if len == 0 {
20030                return Ok(());
20031            };
20032            depth.increment()?;
20033            let envelope_size = 8;
20034            let bytes_len = len * envelope_size;
20035            let offset = decoder.out_of_line_offset(bytes_len)?;
20036            // Decode the envelope for each type.
20037            let mut _next_ordinal_to_read = 0;
20038            let mut next_offset = offset;
20039            let end_offset = offset + bytes_len;
20040            _next_ordinal_to_read += 1;
20041            if next_offset >= end_offset {
20042                return Ok(());
20043            }
20044
20045            // Decode unknown envelopes for gaps in ordinals.
20046            while _next_ordinal_to_read < 1 {
20047                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20048                _next_ordinal_to_read += 1;
20049                next_offset += envelope_size;
20050            }
20051
20052            let next_out_of_line = decoder.next_out_of_line();
20053            let handles_before = decoder.remaining_handles();
20054            if let Some((inlined, num_bytes, num_handles)) =
20055                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20056            {
20057                let member_inline_size = <fidl::encoding::Vector<
20058                    fidl::encoding::Endpoint<
20059                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20060                    >,
20061                    64,
20062                > as fidl::encoding::TypeMarker>::inline_size(
20063                    decoder.context
20064                );
20065                if inlined != (member_inline_size <= 4) {
20066                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20067                }
20068                let inner_offset;
20069                let mut inner_depth = depth.clone();
20070                if inlined {
20071                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20072                    inner_offset = next_offset;
20073                } else {
20074                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20075                    inner_depth.increment()?;
20076                }
20077                let val_ref = self.tokens.get_or_insert_with(|| {
20078                    fidl::new_empty!(
20079                        fidl::encoding::Vector<
20080                            fidl::encoding::Endpoint<
20081                                fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20082                            >,
20083                            64,
20084                        >,
20085                        fidl::encoding::DefaultFuchsiaResourceDialect
20086                    )
20087                });
20088                fidl::decode!(
20089                    fidl::encoding::Vector<
20090                        fidl::encoding::Endpoint<
20091                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20092                        >,
20093                        64,
20094                    >,
20095                    fidl::encoding::DefaultFuchsiaResourceDialect,
20096                    val_ref,
20097                    decoder,
20098                    inner_offset,
20099                    inner_depth
20100                )?;
20101                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20102                {
20103                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20104                }
20105                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20106                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20107                }
20108            }
20109
20110            next_offset += envelope_size;
20111
20112            // Decode the remaining unknown envelopes.
20113            while next_offset < end_offset {
20114                _next_ordinal_to_read += 1;
20115                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20116                next_offset += envelope_size;
20117            }
20118
20119            Ok(())
20120        }
20121    }
20122
20123    impl BufferCollectionTokenDuplicateSyncResponse {
20124        #[inline(always)]
20125        fn max_ordinal_present(&self) -> u64 {
20126            if let Some(_) = self.tokens {
20127                return 1;
20128            }
20129            0
20130        }
20131    }
20132
20133    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20134        type Borrowed<'a> = &'a mut Self;
20135        fn take_or_borrow<'a>(
20136            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20137        ) -> Self::Borrowed<'a> {
20138            value
20139        }
20140    }
20141
20142    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20143        type Owned = Self;
20144
20145        #[inline(always)]
20146        fn inline_align(_context: fidl::encoding::Context) -> usize {
20147            8
20148        }
20149
20150        #[inline(always)]
20151        fn inline_size(_context: fidl::encoding::Context) -> usize {
20152            16
20153        }
20154    }
20155
20156    unsafe impl
20157        fidl::encoding::Encode<
20158            BufferCollectionTokenDuplicateSyncResponse,
20159            fidl::encoding::DefaultFuchsiaResourceDialect,
20160        > for &mut BufferCollectionTokenDuplicateSyncResponse
20161    {
20162        unsafe fn encode(
20163            self,
20164            encoder: &mut fidl::encoding::Encoder<
20165                '_,
20166                fidl::encoding::DefaultFuchsiaResourceDialect,
20167            >,
20168            offset: usize,
20169            mut depth: fidl::encoding::Depth,
20170        ) -> fidl::Result<()> {
20171            encoder.debug_check_bounds::<BufferCollectionTokenDuplicateSyncResponse>(offset);
20172            // Vector header
20173            let max_ordinal: u64 = self.max_ordinal_present();
20174            encoder.write_num(max_ordinal, offset);
20175            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20176            // Calling encoder.out_of_line_offset(0) is not allowed.
20177            if max_ordinal == 0 {
20178                return Ok(());
20179            }
20180            depth.increment()?;
20181            let envelope_size = 8;
20182            let bytes_len = max_ordinal as usize * envelope_size;
20183            #[allow(unused_variables)]
20184            let offset = encoder.out_of_line_offset(bytes_len);
20185            let mut _prev_end_offset: usize = 0;
20186            if 1 > max_ordinal {
20187                return Ok(());
20188            }
20189
20190            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20191            // are envelope_size bytes.
20192            let cur_offset: usize = (1 - 1) * envelope_size;
20193
20194            // Zero reserved fields.
20195            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20196
20197            // Safety:
20198            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20199            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20200            //   envelope_size bytes, there is always sufficient room.
20201            fidl::encoding::encode_in_envelope_optional::<
20202                fidl::encoding::Vector<
20203                    fidl::encoding::Endpoint<
20204                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20205                    >,
20206                    64,
20207                >,
20208                fidl::encoding::DefaultFuchsiaResourceDialect,
20209            >(
20210                self.tokens.as_mut().map(
20211                    <fidl::encoding::Vector<
20212                        fidl::encoding::Endpoint<
20213                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20214                        >,
20215                        64,
20216                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20217                ),
20218                encoder,
20219                offset + cur_offset,
20220                depth,
20221            )?;
20222
20223            _prev_end_offset = cur_offset + envelope_size;
20224
20225            Ok(())
20226        }
20227    }
20228
20229    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20230        for BufferCollectionTokenDuplicateSyncResponse
20231    {
20232        #[inline(always)]
20233        fn new_empty() -> Self {
20234            Self::default()
20235        }
20236
20237        unsafe fn decode(
20238            &mut self,
20239            decoder: &mut fidl::encoding::Decoder<
20240                '_,
20241                fidl::encoding::DefaultFuchsiaResourceDialect,
20242            >,
20243            offset: usize,
20244            mut depth: fidl::encoding::Depth,
20245        ) -> fidl::Result<()> {
20246            decoder.debug_check_bounds::<Self>(offset);
20247            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20248                None => return Err(fidl::Error::NotNullable),
20249                Some(len) => len,
20250            };
20251            // Calling decoder.out_of_line_offset(0) is not allowed.
20252            if len == 0 {
20253                return Ok(());
20254            };
20255            depth.increment()?;
20256            let envelope_size = 8;
20257            let bytes_len = len * envelope_size;
20258            let offset = decoder.out_of_line_offset(bytes_len)?;
20259            // Decode the envelope for each type.
20260            let mut _next_ordinal_to_read = 0;
20261            let mut next_offset = offset;
20262            let end_offset = offset + bytes_len;
20263            _next_ordinal_to_read += 1;
20264            if next_offset >= end_offset {
20265                return Ok(());
20266            }
20267
20268            // Decode unknown envelopes for gaps in ordinals.
20269            while _next_ordinal_to_read < 1 {
20270                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20271                _next_ordinal_to_read += 1;
20272                next_offset += envelope_size;
20273            }
20274
20275            let next_out_of_line = decoder.next_out_of_line();
20276            let handles_before = decoder.remaining_handles();
20277            if let Some((inlined, num_bytes, num_handles)) =
20278                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20279            {
20280                let member_inline_size = <fidl::encoding::Vector<
20281                    fidl::encoding::Endpoint<
20282                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20283                    >,
20284                    64,
20285                > as fidl::encoding::TypeMarker>::inline_size(
20286                    decoder.context
20287                );
20288                if inlined != (member_inline_size <= 4) {
20289                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20290                }
20291                let inner_offset;
20292                let mut inner_depth = depth.clone();
20293                if inlined {
20294                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20295                    inner_offset = next_offset;
20296                } else {
20297                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20298                    inner_depth.increment()?;
20299                }
20300                let val_ref = self.tokens.get_or_insert_with(|| {
20301                    fidl::new_empty!(
20302                        fidl::encoding::Vector<
20303                            fidl::encoding::Endpoint<
20304                                fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20305                            >,
20306                            64,
20307                        >,
20308                        fidl::encoding::DefaultFuchsiaResourceDialect
20309                    )
20310                });
20311                fidl::decode!(
20312                    fidl::encoding::Vector<
20313                        fidl::encoding::Endpoint<
20314                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20315                        >,
20316                        64,
20317                    >,
20318                    fidl::encoding::DefaultFuchsiaResourceDialect,
20319                    val_ref,
20320                    decoder,
20321                    inner_offset,
20322                    inner_depth
20323                )?;
20324                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20325                {
20326                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20327                }
20328                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20329                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20330                }
20331            }
20332
20333            next_offset += envelope_size;
20334
20335            // Decode the remaining unknown envelopes.
20336            while next_offset < end_offset {
20337                _next_ordinal_to_read += 1;
20338                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20339                next_offset += envelope_size;
20340            }
20341
20342            Ok(())
20343        }
20344    }
20345
20346    impl BufferCollectionWaitForAllBuffersAllocatedResponse {
20347        #[inline(always)]
20348        fn max_ordinal_present(&self) -> u64 {
20349            if let Some(_) = self.buffer_collection_info {
20350                return 1;
20351            }
20352            0
20353        }
20354    }
20355
20356    impl fidl::encoding::ResourceTypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20357        type Borrowed<'a> = &'a mut Self;
20358        fn take_or_borrow<'a>(
20359            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20360        ) -> Self::Borrowed<'a> {
20361            value
20362        }
20363    }
20364
20365    unsafe impl fidl::encoding::TypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20366        type Owned = Self;
20367
20368        #[inline(always)]
20369        fn inline_align(_context: fidl::encoding::Context) -> usize {
20370            8
20371        }
20372
20373        #[inline(always)]
20374        fn inline_size(_context: fidl::encoding::Context) -> usize {
20375            16
20376        }
20377    }
20378
20379    unsafe impl
20380        fidl::encoding::Encode<
20381            BufferCollectionWaitForAllBuffersAllocatedResponse,
20382            fidl::encoding::DefaultFuchsiaResourceDialect,
20383        > for &mut BufferCollectionWaitForAllBuffersAllocatedResponse
20384    {
20385        unsafe fn encode(
20386            self,
20387            encoder: &mut fidl::encoding::Encoder<
20388                '_,
20389                fidl::encoding::DefaultFuchsiaResourceDialect,
20390            >,
20391            offset: usize,
20392            mut depth: fidl::encoding::Depth,
20393        ) -> fidl::Result<()> {
20394            encoder
20395                .debug_check_bounds::<BufferCollectionWaitForAllBuffersAllocatedResponse>(offset);
20396            // Vector header
20397            let max_ordinal: u64 = self.max_ordinal_present();
20398            encoder.write_num(max_ordinal, offset);
20399            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20400            // Calling encoder.out_of_line_offset(0) is not allowed.
20401            if max_ordinal == 0 {
20402                return Ok(());
20403            }
20404            depth.increment()?;
20405            let envelope_size = 8;
20406            let bytes_len = max_ordinal as usize * envelope_size;
20407            #[allow(unused_variables)]
20408            let offset = encoder.out_of_line_offset(bytes_len);
20409            let mut _prev_end_offset: usize = 0;
20410            if 1 > max_ordinal {
20411                return Ok(());
20412            }
20413
20414            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20415            // are envelope_size bytes.
20416            let cur_offset: usize = (1 - 1) * envelope_size;
20417
20418            // Zero reserved fields.
20419            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20420
20421            // Safety:
20422            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20423            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20424            //   envelope_size bytes, there is always sufficient room.
20425            fidl::encoding::encode_in_envelope_optional::<
20426                BufferCollectionInfo,
20427                fidl::encoding::DefaultFuchsiaResourceDialect,
20428            >(
20429                self.buffer_collection_info.as_mut().map(
20430                    <BufferCollectionInfo as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20431                ),
20432                encoder,
20433                offset + cur_offset,
20434                depth,
20435            )?;
20436
20437            _prev_end_offset = cur_offset + envelope_size;
20438
20439            Ok(())
20440        }
20441    }
20442
20443    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20444        for BufferCollectionWaitForAllBuffersAllocatedResponse
20445    {
20446        #[inline(always)]
20447        fn new_empty() -> Self {
20448            Self::default()
20449        }
20450
20451        unsafe fn decode(
20452            &mut self,
20453            decoder: &mut fidl::encoding::Decoder<
20454                '_,
20455                fidl::encoding::DefaultFuchsiaResourceDialect,
20456            >,
20457            offset: usize,
20458            mut depth: fidl::encoding::Depth,
20459        ) -> fidl::Result<()> {
20460            decoder.debug_check_bounds::<Self>(offset);
20461            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20462                None => return Err(fidl::Error::NotNullable),
20463                Some(len) => len,
20464            };
20465            // Calling decoder.out_of_line_offset(0) is not allowed.
20466            if len == 0 {
20467                return Ok(());
20468            };
20469            depth.increment()?;
20470            let envelope_size = 8;
20471            let bytes_len = len * envelope_size;
20472            let offset = decoder.out_of_line_offset(bytes_len)?;
20473            // Decode the envelope for each type.
20474            let mut _next_ordinal_to_read = 0;
20475            let mut next_offset = offset;
20476            let end_offset = offset + bytes_len;
20477            _next_ordinal_to_read += 1;
20478            if next_offset >= end_offset {
20479                return Ok(());
20480            }
20481
20482            // Decode unknown envelopes for gaps in ordinals.
20483            while _next_ordinal_to_read < 1 {
20484                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20485                _next_ordinal_to_read += 1;
20486                next_offset += envelope_size;
20487            }
20488
20489            let next_out_of_line = decoder.next_out_of_line();
20490            let handles_before = decoder.remaining_handles();
20491            if let Some((inlined, num_bytes, num_handles)) =
20492                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20493            {
20494                let member_inline_size =
20495                    <BufferCollectionInfo as fidl::encoding::TypeMarker>::inline_size(
20496                        decoder.context,
20497                    );
20498                if inlined != (member_inline_size <= 4) {
20499                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20500                }
20501                let inner_offset;
20502                let mut inner_depth = depth.clone();
20503                if inlined {
20504                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20505                    inner_offset = next_offset;
20506                } else {
20507                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20508                    inner_depth.increment()?;
20509                }
20510                let val_ref = self.buffer_collection_info.get_or_insert_with(|| {
20511                    fidl::new_empty!(
20512                        BufferCollectionInfo,
20513                        fidl::encoding::DefaultFuchsiaResourceDialect
20514                    )
20515                });
20516                fidl::decode!(
20517                    BufferCollectionInfo,
20518                    fidl::encoding::DefaultFuchsiaResourceDialect,
20519                    val_ref,
20520                    decoder,
20521                    inner_offset,
20522                    inner_depth
20523                )?;
20524                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20525                {
20526                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20527                }
20528                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20529                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20530                }
20531            }
20532
20533            next_offset += envelope_size;
20534
20535            // Decode the remaining unknown envelopes.
20536            while next_offset < end_offset {
20537                _next_ordinal_to_read += 1;
20538                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20539                next_offset += envelope_size;
20540            }
20541
20542            Ok(())
20543        }
20544    }
20545
20546    impl NodeAttachNodeTrackingRequest {
20547        #[inline(always)]
20548        fn max_ordinal_present(&self) -> u64 {
20549            if let Some(_) = self.server_end {
20550                return 1;
20551            }
20552            0
20553        }
20554    }
20555
20556    impl fidl::encoding::ResourceTypeMarker for NodeAttachNodeTrackingRequest {
20557        type Borrowed<'a> = &'a mut Self;
20558        fn take_or_borrow<'a>(
20559            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20560        ) -> Self::Borrowed<'a> {
20561            value
20562        }
20563    }
20564
20565    unsafe impl fidl::encoding::TypeMarker for NodeAttachNodeTrackingRequest {
20566        type Owned = Self;
20567
20568        #[inline(always)]
20569        fn inline_align(_context: fidl::encoding::Context) -> usize {
20570            8
20571        }
20572
20573        #[inline(always)]
20574        fn inline_size(_context: fidl::encoding::Context) -> usize {
20575            16
20576        }
20577    }
20578
20579    unsafe impl
20580        fidl::encoding::Encode<
20581            NodeAttachNodeTrackingRequest,
20582            fidl::encoding::DefaultFuchsiaResourceDialect,
20583        > for &mut NodeAttachNodeTrackingRequest
20584    {
20585        unsafe fn encode(
20586            self,
20587            encoder: &mut fidl::encoding::Encoder<
20588                '_,
20589                fidl::encoding::DefaultFuchsiaResourceDialect,
20590            >,
20591            offset: usize,
20592            mut depth: fidl::encoding::Depth,
20593        ) -> fidl::Result<()> {
20594            encoder.debug_check_bounds::<NodeAttachNodeTrackingRequest>(offset);
20595            // Vector header
20596            let max_ordinal: u64 = self.max_ordinal_present();
20597            encoder.write_num(max_ordinal, offset);
20598            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20599            // Calling encoder.out_of_line_offset(0) is not allowed.
20600            if max_ordinal == 0 {
20601                return Ok(());
20602            }
20603            depth.increment()?;
20604            let envelope_size = 8;
20605            let bytes_len = max_ordinal as usize * envelope_size;
20606            #[allow(unused_variables)]
20607            let offset = encoder.out_of_line_offset(bytes_len);
20608            let mut _prev_end_offset: usize = 0;
20609            if 1 > max_ordinal {
20610                return Ok(());
20611            }
20612
20613            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20614            // are envelope_size bytes.
20615            let cur_offset: usize = (1 - 1) * envelope_size;
20616
20617            // Zero reserved fields.
20618            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20619
20620            // Safety:
20621            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20622            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20623            //   envelope_size bytes, there is always sufficient room.
20624            fidl::encoding::encode_in_envelope_optional::<
20625                fidl::encoding::HandleType<
20626                    fidl::EventPair,
20627                    { fidl::ObjectType::EVENTPAIR.into_raw() },
20628                    2147483648,
20629                >,
20630                fidl::encoding::DefaultFuchsiaResourceDialect,
20631            >(
20632                self.server_end.as_mut().map(
20633                    <fidl::encoding::HandleType<
20634                        fidl::EventPair,
20635                        { fidl::ObjectType::EVENTPAIR.into_raw() },
20636                        2147483648,
20637                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20638                ),
20639                encoder,
20640                offset + cur_offset,
20641                depth,
20642            )?;
20643
20644            _prev_end_offset = cur_offset + envelope_size;
20645
20646            Ok(())
20647        }
20648    }
20649
20650    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20651        for NodeAttachNodeTrackingRequest
20652    {
20653        #[inline(always)]
20654        fn new_empty() -> Self {
20655            Self::default()
20656        }
20657
20658        unsafe fn decode(
20659            &mut self,
20660            decoder: &mut fidl::encoding::Decoder<
20661                '_,
20662                fidl::encoding::DefaultFuchsiaResourceDialect,
20663            >,
20664            offset: usize,
20665            mut depth: fidl::encoding::Depth,
20666        ) -> fidl::Result<()> {
20667            decoder.debug_check_bounds::<Self>(offset);
20668            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20669                None => return Err(fidl::Error::NotNullable),
20670                Some(len) => len,
20671            };
20672            // Calling decoder.out_of_line_offset(0) is not allowed.
20673            if len == 0 {
20674                return Ok(());
20675            };
20676            depth.increment()?;
20677            let envelope_size = 8;
20678            let bytes_len = len * envelope_size;
20679            let offset = decoder.out_of_line_offset(bytes_len)?;
20680            // Decode the envelope for each type.
20681            let mut _next_ordinal_to_read = 0;
20682            let mut next_offset = offset;
20683            let end_offset = offset + bytes_len;
20684            _next_ordinal_to_read += 1;
20685            if next_offset >= end_offset {
20686                return Ok(());
20687            }
20688
20689            // Decode unknown envelopes for gaps in ordinals.
20690            while _next_ordinal_to_read < 1 {
20691                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20692                _next_ordinal_to_read += 1;
20693                next_offset += envelope_size;
20694            }
20695
20696            let next_out_of_line = decoder.next_out_of_line();
20697            let handles_before = decoder.remaining_handles();
20698            if let Some((inlined, num_bytes, num_handles)) =
20699                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20700            {
20701                let member_inline_size = <fidl::encoding::HandleType<
20702                    fidl::EventPair,
20703                    { fidl::ObjectType::EVENTPAIR.into_raw() },
20704                    2147483648,
20705                > as fidl::encoding::TypeMarker>::inline_size(
20706                    decoder.context
20707                );
20708                if inlined != (member_inline_size <= 4) {
20709                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20710                }
20711                let inner_offset;
20712                let mut inner_depth = depth.clone();
20713                if inlined {
20714                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20715                    inner_offset = next_offset;
20716                } else {
20717                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20718                    inner_depth.increment()?;
20719                }
20720                let val_ref =
20721                self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
20722                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
20723                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20724                {
20725                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20726                }
20727                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20728                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20729                }
20730            }
20731
20732            next_offset += envelope_size;
20733
20734            // Decode the remaining unknown envelopes.
20735            while next_offset < end_offset {
20736                _next_ordinal_to_read += 1;
20737                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20738                next_offset += envelope_size;
20739            }
20740
20741            Ok(())
20742        }
20743    }
20744
20745    impl NodeIsAlternateForRequest {
20746        #[inline(always)]
20747        fn max_ordinal_present(&self) -> u64 {
20748            if let Some(_) = self.node_ref {
20749                return 1;
20750            }
20751            0
20752        }
20753    }
20754
20755    impl fidl::encoding::ResourceTypeMarker for NodeIsAlternateForRequest {
20756        type Borrowed<'a> = &'a mut Self;
20757        fn take_or_borrow<'a>(
20758            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20759        ) -> Self::Borrowed<'a> {
20760            value
20761        }
20762    }
20763
20764    unsafe impl fidl::encoding::TypeMarker for NodeIsAlternateForRequest {
20765        type Owned = Self;
20766
20767        #[inline(always)]
20768        fn inline_align(_context: fidl::encoding::Context) -> usize {
20769            8
20770        }
20771
20772        #[inline(always)]
20773        fn inline_size(_context: fidl::encoding::Context) -> usize {
20774            16
20775        }
20776    }
20777
20778    unsafe impl
20779        fidl::encoding::Encode<
20780            NodeIsAlternateForRequest,
20781            fidl::encoding::DefaultFuchsiaResourceDialect,
20782        > for &mut NodeIsAlternateForRequest
20783    {
20784        unsafe fn encode(
20785            self,
20786            encoder: &mut fidl::encoding::Encoder<
20787                '_,
20788                fidl::encoding::DefaultFuchsiaResourceDialect,
20789            >,
20790            offset: usize,
20791            mut depth: fidl::encoding::Depth,
20792        ) -> fidl::Result<()> {
20793            encoder.debug_check_bounds::<NodeIsAlternateForRequest>(offset);
20794            // Vector header
20795            let max_ordinal: u64 = self.max_ordinal_present();
20796            encoder.write_num(max_ordinal, offset);
20797            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20798            // Calling encoder.out_of_line_offset(0) is not allowed.
20799            if max_ordinal == 0 {
20800                return Ok(());
20801            }
20802            depth.increment()?;
20803            let envelope_size = 8;
20804            let bytes_len = max_ordinal as usize * envelope_size;
20805            #[allow(unused_variables)]
20806            let offset = encoder.out_of_line_offset(bytes_len);
20807            let mut _prev_end_offset: usize = 0;
20808            if 1 > max_ordinal {
20809                return Ok(());
20810            }
20811
20812            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20813            // are envelope_size bytes.
20814            let cur_offset: usize = (1 - 1) * envelope_size;
20815
20816            // Zero reserved fields.
20817            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20818
20819            // Safety:
20820            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20821            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20822            //   envelope_size bytes, there is always sufficient room.
20823            fidl::encoding::encode_in_envelope_optional::<
20824                fidl::encoding::HandleType<
20825                    fidl::Event,
20826                    { fidl::ObjectType::EVENT.into_raw() },
20827                    2147483648,
20828                >,
20829                fidl::encoding::DefaultFuchsiaResourceDialect,
20830            >(
20831                self.node_ref.as_mut().map(
20832                    <fidl::encoding::HandleType<
20833                        fidl::Event,
20834                        { fidl::ObjectType::EVENT.into_raw() },
20835                        2147483648,
20836                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20837                ),
20838                encoder,
20839                offset + cur_offset,
20840                depth,
20841            )?;
20842
20843            _prev_end_offset = cur_offset + envelope_size;
20844
20845            Ok(())
20846        }
20847    }
20848
20849    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20850        for NodeIsAlternateForRequest
20851    {
20852        #[inline(always)]
20853        fn new_empty() -> Self {
20854            Self::default()
20855        }
20856
20857        unsafe fn decode(
20858            &mut self,
20859            decoder: &mut fidl::encoding::Decoder<
20860                '_,
20861                fidl::encoding::DefaultFuchsiaResourceDialect,
20862            >,
20863            offset: usize,
20864            mut depth: fidl::encoding::Depth,
20865        ) -> fidl::Result<()> {
20866            decoder.debug_check_bounds::<Self>(offset);
20867            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20868                None => return Err(fidl::Error::NotNullable),
20869                Some(len) => len,
20870            };
20871            // Calling decoder.out_of_line_offset(0) is not allowed.
20872            if len == 0 {
20873                return Ok(());
20874            };
20875            depth.increment()?;
20876            let envelope_size = 8;
20877            let bytes_len = len * envelope_size;
20878            let offset = decoder.out_of_line_offset(bytes_len)?;
20879            // Decode the envelope for each type.
20880            let mut _next_ordinal_to_read = 0;
20881            let mut next_offset = offset;
20882            let end_offset = offset + bytes_len;
20883            _next_ordinal_to_read += 1;
20884            if next_offset >= end_offset {
20885                return Ok(());
20886            }
20887
20888            // Decode unknown envelopes for gaps in ordinals.
20889            while _next_ordinal_to_read < 1 {
20890                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20891                _next_ordinal_to_read += 1;
20892                next_offset += envelope_size;
20893            }
20894
20895            let next_out_of_line = decoder.next_out_of_line();
20896            let handles_before = decoder.remaining_handles();
20897            if let Some((inlined, num_bytes, num_handles)) =
20898                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20899            {
20900                let member_inline_size = <fidl::encoding::HandleType<
20901                    fidl::Event,
20902                    { fidl::ObjectType::EVENT.into_raw() },
20903                    2147483648,
20904                > as fidl::encoding::TypeMarker>::inline_size(
20905                    decoder.context
20906                );
20907                if inlined != (member_inline_size <= 4) {
20908                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20909                }
20910                let inner_offset;
20911                let mut inner_depth = depth.clone();
20912                if inlined {
20913                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20914                    inner_offset = next_offset;
20915                } else {
20916                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20917                    inner_depth.increment()?;
20918                }
20919                let val_ref =
20920                self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
20921                fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
20922                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20923                {
20924                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20925                }
20926                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20927                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20928                }
20929            }
20930
20931            next_offset += envelope_size;
20932
20933            // Decode the remaining unknown envelopes.
20934            while next_offset < end_offset {
20935                _next_ordinal_to_read += 1;
20936                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20937                next_offset += envelope_size;
20938            }
20939
20940            Ok(())
20941        }
20942    }
20943
20944    impl NodeSetWeakOkRequest {
20945        #[inline(always)]
20946        fn max_ordinal_present(&self) -> u64 {
20947            if let Some(_) = self.for_child_nodes_also {
20948                return 1;
20949            }
20950            0
20951        }
20952    }
20953
20954    impl fidl::encoding::ResourceTypeMarker for NodeSetWeakOkRequest {
20955        type Borrowed<'a> = &'a mut Self;
20956        fn take_or_borrow<'a>(
20957            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20958        ) -> Self::Borrowed<'a> {
20959            value
20960        }
20961    }
20962
20963    unsafe impl fidl::encoding::TypeMarker for NodeSetWeakOkRequest {
20964        type Owned = Self;
20965
20966        #[inline(always)]
20967        fn inline_align(_context: fidl::encoding::Context) -> usize {
20968            8
20969        }
20970
20971        #[inline(always)]
20972        fn inline_size(_context: fidl::encoding::Context) -> usize {
20973            16
20974        }
20975    }
20976
20977    unsafe impl
20978        fidl::encoding::Encode<NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect>
20979        for &mut NodeSetWeakOkRequest
20980    {
20981        unsafe fn encode(
20982            self,
20983            encoder: &mut fidl::encoding::Encoder<
20984                '_,
20985                fidl::encoding::DefaultFuchsiaResourceDialect,
20986            >,
20987            offset: usize,
20988            mut depth: fidl::encoding::Depth,
20989        ) -> fidl::Result<()> {
20990            encoder.debug_check_bounds::<NodeSetWeakOkRequest>(offset);
20991            // Vector header
20992            let max_ordinal: u64 = self.max_ordinal_present();
20993            encoder.write_num(max_ordinal, offset);
20994            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20995            // Calling encoder.out_of_line_offset(0) is not allowed.
20996            if max_ordinal == 0 {
20997                return Ok(());
20998            }
20999            depth.increment()?;
21000            let envelope_size = 8;
21001            let bytes_len = max_ordinal as usize * envelope_size;
21002            #[allow(unused_variables)]
21003            let offset = encoder.out_of_line_offset(bytes_len);
21004            let mut _prev_end_offset: usize = 0;
21005            if 1 > max_ordinal {
21006                return Ok(());
21007            }
21008
21009            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21010            // are envelope_size bytes.
21011            let cur_offset: usize = (1 - 1) * envelope_size;
21012
21013            // Zero reserved fields.
21014            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21015
21016            // Safety:
21017            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21018            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21019            //   envelope_size bytes, there is always sufficient room.
21020            fidl::encoding::encode_in_envelope_optional::<
21021                bool,
21022                fidl::encoding::DefaultFuchsiaResourceDialect,
21023            >(
21024                self.for_child_nodes_also
21025                    .as_ref()
21026                    .map(<bool as fidl::encoding::ValueTypeMarker>::borrow),
21027                encoder,
21028                offset + cur_offset,
21029                depth,
21030            )?;
21031
21032            _prev_end_offset = cur_offset + envelope_size;
21033
21034            Ok(())
21035        }
21036    }
21037
21038    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21039        for NodeSetWeakOkRequest
21040    {
21041        #[inline(always)]
21042        fn new_empty() -> Self {
21043            Self::default()
21044        }
21045
21046        unsafe fn decode(
21047            &mut self,
21048            decoder: &mut fidl::encoding::Decoder<
21049                '_,
21050                fidl::encoding::DefaultFuchsiaResourceDialect,
21051            >,
21052            offset: usize,
21053            mut depth: fidl::encoding::Depth,
21054        ) -> fidl::Result<()> {
21055            decoder.debug_check_bounds::<Self>(offset);
21056            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21057                None => return Err(fidl::Error::NotNullable),
21058                Some(len) => len,
21059            };
21060            // Calling decoder.out_of_line_offset(0) is not allowed.
21061            if len == 0 {
21062                return Ok(());
21063            };
21064            depth.increment()?;
21065            let envelope_size = 8;
21066            let bytes_len = len * envelope_size;
21067            let offset = decoder.out_of_line_offset(bytes_len)?;
21068            // Decode the envelope for each type.
21069            let mut _next_ordinal_to_read = 0;
21070            let mut next_offset = offset;
21071            let end_offset = offset + bytes_len;
21072            _next_ordinal_to_read += 1;
21073            if next_offset >= end_offset {
21074                return Ok(());
21075            }
21076
21077            // Decode unknown envelopes for gaps in ordinals.
21078            while _next_ordinal_to_read < 1 {
21079                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21080                _next_ordinal_to_read += 1;
21081                next_offset += envelope_size;
21082            }
21083
21084            let next_out_of_line = decoder.next_out_of_line();
21085            let handles_before = decoder.remaining_handles();
21086            if let Some((inlined, num_bytes, num_handles)) =
21087                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21088            {
21089                let member_inline_size =
21090                    <bool as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21091                if inlined != (member_inline_size <= 4) {
21092                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21093                }
21094                let inner_offset;
21095                let mut inner_depth = depth.clone();
21096                if inlined {
21097                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21098                    inner_offset = next_offset;
21099                } else {
21100                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21101                    inner_depth.increment()?;
21102                }
21103                let val_ref = self.for_child_nodes_also.get_or_insert_with(|| {
21104                    fidl::new_empty!(bool, fidl::encoding::DefaultFuchsiaResourceDialect)
21105                });
21106                fidl::decode!(
21107                    bool,
21108                    fidl::encoding::DefaultFuchsiaResourceDialect,
21109                    val_ref,
21110                    decoder,
21111                    inner_offset,
21112                    inner_depth
21113                )?;
21114                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21115                {
21116                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21117                }
21118                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21119                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21120                }
21121            }
21122
21123            next_offset += envelope_size;
21124
21125            // Decode the remaining unknown envelopes.
21126            while next_offset < end_offset {
21127                _next_ordinal_to_read += 1;
21128                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21129                next_offset += envelope_size;
21130            }
21131
21132            Ok(())
21133        }
21134    }
21135
21136    impl NodeGetNodeRefResponse {
21137        #[inline(always)]
21138        fn max_ordinal_present(&self) -> u64 {
21139            if let Some(_) = self.node_ref {
21140                return 1;
21141            }
21142            0
21143        }
21144    }
21145
21146    impl fidl::encoding::ResourceTypeMarker for NodeGetNodeRefResponse {
21147        type Borrowed<'a> = &'a mut Self;
21148        fn take_or_borrow<'a>(
21149            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21150        ) -> Self::Borrowed<'a> {
21151            value
21152        }
21153    }
21154
21155    unsafe impl fidl::encoding::TypeMarker for NodeGetNodeRefResponse {
21156        type Owned = Self;
21157
21158        #[inline(always)]
21159        fn inline_align(_context: fidl::encoding::Context) -> usize {
21160            8
21161        }
21162
21163        #[inline(always)]
21164        fn inline_size(_context: fidl::encoding::Context) -> usize {
21165            16
21166        }
21167    }
21168
21169    unsafe impl
21170        fidl::encoding::Encode<
21171            NodeGetNodeRefResponse,
21172            fidl::encoding::DefaultFuchsiaResourceDialect,
21173        > for &mut NodeGetNodeRefResponse
21174    {
21175        unsafe fn encode(
21176            self,
21177            encoder: &mut fidl::encoding::Encoder<
21178                '_,
21179                fidl::encoding::DefaultFuchsiaResourceDialect,
21180            >,
21181            offset: usize,
21182            mut depth: fidl::encoding::Depth,
21183        ) -> fidl::Result<()> {
21184            encoder.debug_check_bounds::<NodeGetNodeRefResponse>(offset);
21185            // Vector header
21186            let max_ordinal: u64 = self.max_ordinal_present();
21187            encoder.write_num(max_ordinal, offset);
21188            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21189            // Calling encoder.out_of_line_offset(0) is not allowed.
21190            if max_ordinal == 0 {
21191                return Ok(());
21192            }
21193            depth.increment()?;
21194            let envelope_size = 8;
21195            let bytes_len = max_ordinal as usize * envelope_size;
21196            #[allow(unused_variables)]
21197            let offset = encoder.out_of_line_offset(bytes_len);
21198            let mut _prev_end_offset: usize = 0;
21199            if 1 > max_ordinal {
21200                return Ok(());
21201            }
21202
21203            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21204            // are envelope_size bytes.
21205            let cur_offset: usize = (1 - 1) * envelope_size;
21206
21207            // Zero reserved fields.
21208            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21209
21210            // Safety:
21211            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21212            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21213            //   envelope_size bytes, there is always sufficient room.
21214            fidl::encoding::encode_in_envelope_optional::<
21215                fidl::encoding::HandleType<
21216                    fidl::Event,
21217                    { fidl::ObjectType::EVENT.into_raw() },
21218                    2147483648,
21219                >,
21220                fidl::encoding::DefaultFuchsiaResourceDialect,
21221            >(
21222                self.node_ref.as_mut().map(
21223                    <fidl::encoding::HandleType<
21224                        fidl::Event,
21225                        { fidl::ObjectType::EVENT.into_raw() },
21226                        2147483648,
21227                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21228                ),
21229                encoder,
21230                offset + cur_offset,
21231                depth,
21232            )?;
21233
21234            _prev_end_offset = cur_offset + envelope_size;
21235
21236            Ok(())
21237        }
21238    }
21239
21240    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21241        for NodeGetNodeRefResponse
21242    {
21243        #[inline(always)]
21244        fn new_empty() -> Self {
21245            Self::default()
21246        }
21247
21248        unsafe fn decode(
21249            &mut self,
21250            decoder: &mut fidl::encoding::Decoder<
21251                '_,
21252                fidl::encoding::DefaultFuchsiaResourceDialect,
21253            >,
21254            offset: usize,
21255            mut depth: fidl::encoding::Depth,
21256        ) -> fidl::Result<()> {
21257            decoder.debug_check_bounds::<Self>(offset);
21258            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21259                None => return Err(fidl::Error::NotNullable),
21260                Some(len) => len,
21261            };
21262            // Calling decoder.out_of_line_offset(0) is not allowed.
21263            if len == 0 {
21264                return Ok(());
21265            };
21266            depth.increment()?;
21267            let envelope_size = 8;
21268            let bytes_len = len * envelope_size;
21269            let offset = decoder.out_of_line_offset(bytes_len)?;
21270            // Decode the envelope for each type.
21271            let mut _next_ordinal_to_read = 0;
21272            let mut next_offset = offset;
21273            let end_offset = offset + bytes_len;
21274            _next_ordinal_to_read += 1;
21275            if next_offset >= end_offset {
21276                return Ok(());
21277            }
21278
21279            // Decode unknown envelopes for gaps in ordinals.
21280            while _next_ordinal_to_read < 1 {
21281                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21282                _next_ordinal_to_read += 1;
21283                next_offset += envelope_size;
21284            }
21285
21286            let next_out_of_line = decoder.next_out_of_line();
21287            let handles_before = decoder.remaining_handles();
21288            if let Some((inlined, num_bytes, num_handles)) =
21289                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21290            {
21291                let member_inline_size = <fidl::encoding::HandleType<
21292                    fidl::Event,
21293                    { fidl::ObjectType::EVENT.into_raw() },
21294                    2147483648,
21295                > as fidl::encoding::TypeMarker>::inline_size(
21296                    decoder.context
21297                );
21298                if inlined != (member_inline_size <= 4) {
21299                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21300                }
21301                let inner_offset;
21302                let mut inner_depth = depth.clone();
21303                if inlined {
21304                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21305                    inner_offset = next_offset;
21306                } else {
21307                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21308                    inner_depth.increment()?;
21309                }
21310                let val_ref =
21311                self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21312                fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21313                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21314                {
21315                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21316                }
21317                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21318                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21319                }
21320            }
21321
21322            next_offset += envelope_size;
21323
21324            // Decode the remaining unknown envelopes.
21325            while next_offset < end_offset {
21326                _next_ordinal_to_read += 1;
21327                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21328                next_offset += envelope_size;
21329            }
21330
21331            Ok(())
21332        }
21333    }
21334
21335    impl VmoBuffer {
21336        #[inline(always)]
21337        fn max_ordinal_present(&self) -> u64 {
21338            if let Some(_) = self.close_weak_asap {
21339                return 3;
21340            }
21341            if let Some(_) = self.vmo_usable_start {
21342                return 2;
21343            }
21344            if let Some(_) = self.vmo {
21345                return 1;
21346            }
21347            0
21348        }
21349    }
21350
21351    impl fidl::encoding::ResourceTypeMarker for VmoBuffer {
21352        type Borrowed<'a> = &'a mut Self;
21353        fn take_or_borrow<'a>(
21354            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21355        ) -> Self::Borrowed<'a> {
21356            value
21357        }
21358    }
21359
21360    unsafe impl fidl::encoding::TypeMarker for VmoBuffer {
21361        type Owned = Self;
21362
21363        #[inline(always)]
21364        fn inline_align(_context: fidl::encoding::Context) -> usize {
21365            8
21366        }
21367
21368        #[inline(always)]
21369        fn inline_size(_context: fidl::encoding::Context) -> usize {
21370            16
21371        }
21372    }
21373
21374    unsafe impl fidl::encoding::Encode<VmoBuffer, fidl::encoding::DefaultFuchsiaResourceDialect>
21375        for &mut VmoBuffer
21376    {
21377        unsafe fn encode(
21378            self,
21379            encoder: &mut fidl::encoding::Encoder<
21380                '_,
21381                fidl::encoding::DefaultFuchsiaResourceDialect,
21382            >,
21383            offset: usize,
21384            mut depth: fidl::encoding::Depth,
21385        ) -> fidl::Result<()> {
21386            encoder.debug_check_bounds::<VmoBuffer>(offset);
21387            // Vector header
21388            let max_ordinal: u64 = self.max_ordinal_present();
21389            encoder.write_num(max_ordinal, offset);
21390            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21391            // Calling encoder.out_of_line_offset(0) is not allowed.
21392            if max_ordinal == 0 {
21393                return Ok(());
21394            }
21395            depth.increment()?;
21396            let envelope_size = 8;
21397            let bytes_len = max_ordinal as usize * envelope_size;
21398            #[allow(unused_variables)]
21399            let offset = encoder.out_of_line_offset(bytes_len);
21400            let mut _prev_end_offset: usize = 0;
21401            if 1 > max_ordinal {
21402                return Ok(());
21403            }
21404
21405            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21406            // are envelope_size bytes.
21407            let cur_offset: usize = (1 - 1) * envelope_size;
21408
21409            // Zero reserved fields.
21410            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21411
21412            // Safety:
21413            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21414            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21415            //   envelope_size bytes, there is always sufficient room.
21416            fidl::encoding::encode_in_envelope_optional::<
21417                fidl::encoding::HandleType<
21418                    fidl::Vmo,
21419                    { fidl::ObjectType::VMO.into_raw() },
21420                    2147483648,
21421                >,
21422                fidl::encoding::DefaultFuchsiaResourceDialect,
21423            >(
21424                self.vmo.as_mut().map(
21425                    <fidl::encoding::HandleType<
21426                        fidl::Vmo,
21427                        { fidl::ObjectType::VMO.into_raw() },
21428                        2147483648,
21429                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21430                ),
21431                encoder,
21432                offset + cur_offset,
21433                depth,
21434            )?;
21435
21436            _prev_end_offset = cur_offset + envelope_size;
21437            if 2 > max_ordinal {
21438                return Ok(());
21439            }
21440
21441            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21442            // are envelope_size bytes.
21443            let cur_offset: usize = (2 - 1) * envelope_size;
21444
21445            // Zero reserved fields.
21446            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21447
21448            // Safety:
21449            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21450            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21451            //   envelope_size bytes, there is always sufficient room.
21452            fidl::encoding::encode_in_envelope_optional::<
21453                u64,
21454                fidl::encoding::DefaultFuchsiaResourceDialect,
21455            >(
21456                self.vmo_usable_start
21457                    .as_ref()
21458                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
21459                encoder,
21460                offset + cur_offset,
21461                depth,
21462            )?;
21463
21464            _prev_end_offset = cur_offset + envelope_size;
21465            if 3 > max_ordinal {
21466                return Ok(());
21467            }
21468
21469            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21470            // are envelope_size bytes.
21471            let cur_offset: usize = (3 - 1) * envelope_size;
21472
21473            // Zero reserved fields.
21474            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21475
21476            // Safety:
21477            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21478            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21479            //   envelope_size bytes, there is always sufficient room.
21480            fidl::encoding::encode_in_envelope_optional::<
21481                fidl::encoding::HandleType<
21482                    fidl::EventPair,
21483                    { fidl::ObjectType::EVENTPAIR.into_raw() },
21484                    2147483648,
21485                >,
21486                fidl::encoding::DefaultFuchsiaResourceDialect,
21487            >(
21488                self.close_weak_asap.as_mut().map(
21489                    <fidl::encoding::HandleType<
21490                        fidl::EventPair,
21491                        { fidl::ObjectType::EVENTPAIR.into_raw() },
21492                        2147483648,
21493                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21494                ),
21495                encoder,
21496                offset + cur_offset,
21497                depth,
21498            )?;
21499
21500            _prev_end_offset = cur_offset + envelope_size;
21501
21502            Ok(())
21503        }
21504    }
21505
21506    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {
21507        #[inline(always)]
21508        fn new_empty() -> Self {
21509            Self::default()
21510        }
21511
21512        unsafe fn decode(
21513            &mut self,
21514            decoder: &mut fidl::encoding::Decoder<
21515                '_,
21516                fidl::encoding::DefaultFuchsiaResourceDialect,
21517            >,
21518            offset: usize,
21519            mut depth: fidl::encoding::Depth,
21520        ) -> fidl::Result<()> {
21521            decoder.debug_check_bounds::<Self>(offset);
21522            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21523                None => return Err(fidl::Error::NotNullable),
21524                Some(len) => len,
21525            };
21526            // Calling decoder.out_of_line_offset(0) is not allowed.
21527            if len == 0 {
21528                return Ok(());
21529            };
21530            depth.increment()?;
21531            let envelope_size = 8;
21532            let bytes_len = len * envelope_size;
21533            let offset = decoder.out_of_line_offset(bytes_len)?;
21534            // Decode the envelope for each type.
21535            let mut _next_ordinal_to_read = 0;
21536            let mut next_offset = offset;
21537            let end_offset = offset + bytes_len;
21538            _next_ordinal_to_read += 1;
21539            if next_offset >= end_offset {
21540                return Ok(());
21541            }
21542
21543            // Decode unknown envelopes for gaps in ordinals.
21544            while _next_ordinal_to_read < 1 {
21545                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21546                _next_ordinal_to_read += 1;
21547                next_offset += envelope_size;
21548            }
21549
21550            let next_out_of_line = decoder.next_out_of_line();
21551            let handles_before = decoder.remaining_handles();
21552            if let Some((inlined, num_bytes, num_handles)) =
21553                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21554            {
21555                let member_inline_size = <fidl::encoding::HandleType<
21556                    fidl::Vmo,
21557                    { fidl::ObjectType::VMO.into_raw() },
21558                    2147483648,
21559                > as fidl::encoding::TypeMarker>::inline_size(
21560                    decoder.context
21561                );
21562                if inlined != (member_inline_size <= 4) {
21563                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21564                }
21565                let inner_offset;
21566                let mut inner_depth = depth.clone();
21567                if inlined {
21568                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21569                    inner_offset = next_offset;
21570                } else {
21571                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21572                    inner_depth.increment()?;
21573                }
21574                let val_ref =
21575                self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21576                fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21577                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21578                {
21579                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21580                }
21581                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21582                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21583                }
21584            }
21585
21586            next_offset += envelope_size;
21587            _next_ordinal_to_read += 1;
21588            if next_offset >= end_offset {
21589                return Ok(());
21590            }
21591
21592            // Decode unknown envelopes for gaps in ordinals.
21593            while _next_ordinal_to_read < 2 {
21594                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21595                _next_ordinal_to_read += 1;
21596                next_offset += envelope_size;
21597            }
21598
21599            let next_out_of_line = decoder.next_out_of_line();
21600            let handles_before = decoder.remaining_handles();
21601            if let Some((inlined, num_bytes, num_handles)) =
21602                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21603            {
21604                let member_inline_size =
21605                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21606                if inlined != (member_inline_size <= 4) {
21607                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21608                }
21609                let inner_offset;
21610                let mut inner_depth = depth.clone();
21611                if inlined {
21612                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21613                    inner_offset = next_offset;
21614                } else {
21615                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21616                    inner_depth.increment()?;
21617                }
21618                let val_ref = self.vmo_usable_start.get_or_insert_with(|| {
21619                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
21620                });
21621                fidl::decode!(
21622                    u64,
21623                    fidl::encoding::DefaultFuchsiaResourceDialect,
21624                    val_ref,
21625                    decoder,
21626                    inner_offset,
21627                    inner_depth
21628                )?;
21629                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21630                {
21631                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21632                }
21633                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21634                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21635                }
21636            }
21637
21638            next_offset += envelope_size;
21639            _next_ordinal_to_read += 1;
21640            if next_offset >= end_offset {
21641                return Ok(());
21642            }
21643
21644            // Decode unknown envelopes for gaps in ordinals.
21645            while _next_ordinal_to_read < 3 {
21646                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21647                _next_ordinal_to_read += 1;
21648                next_offset += envelope_size;
21649            }
21650
21651            let next_out_of_line = decoder.next_out_of_line();
21652            let handles_before = decoder.remaining_handles();
21653            if let Some((inlined, num_bytes, num_handles)) =
21654                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21655            {
21656                let member_inline_size = <fidl::encoding::HandleType<
21657                    fidl::EventPair,
21658                    { fidl::ObjectType::EVENTPAIR.into_raw() },
21659                    2147483648,
21660                > as fidl::encoding::TypeMarker>::inline_size(
21661                    decoder.context
21662                );
21663                if inlined != (member_inline_size <= 4) {
21664                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21665                }
21666                let inner_offset;
21667                let mut inner_depth = depth.clone();
21668                if inlined {
21669                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21670                    inner_offset = next_offset;
21671                } else {
21672                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21673                    inner_depth.increment()?;
21674                }
21675                let val_ref =
21676                self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21677                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21678                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21679                {
21680                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21681                }
21682                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21683                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21684                }
21685            }
21686
21687            next_offset += envelope_size;
21688
21689            // Decode the remaining unknown envelopes.
21690            while next_offset < end_offset {
21691                _next_ordinal_to_read += 1;
21692                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21693                next_offset += envelope_size;
21694            }
21695
21696            Ok(())
21697        }
21698    }
21699}