fidl_fuchsia_sysmem2/
fidl_fuchsia_sysmem2.rs

1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_sysmem2__common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Default, PartialEq)]
15pub struct AllocatorAllocateNonSharedCollectionRequest {
16    pub collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17    #[doc(hidden)]
18    pub __source_breaking: fidl::marker::SourceBreaking,
19}
20
21impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
22    for AllocatorAllocateNonSharedCollectionRequest
23{
24}
25
26#[derive(Debug, Default, PartialEq)]
27pub struct AllocatorAllocateSharedCollectionRequest {
28    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
29    #[doc(hidden)]
30    pub __source_breaking: fidl::marker::SourceBreaking,
31}
32
33impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
34    for AllocatorAllocateSharedCollectionRequest
35{
36}
37
38#[derive(Debug, Default, PartialEq)]
39pub struct AllocatorBindSharedCollectionRequest {
40    pub token: Option<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
41    pub buffer_collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
42    #[doc(hidden)]
43    pub __source_breaking: fidl::marker::SourceBreaking,
44}
45
46impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
47    for AllocatorBindSharedCollectionRequest
48{
49}
50
51#[derive(Debug, Default, PartialEq)]
52pub struct AllocatorGetVmoInfoRequest {
53    /// `vmo` is required to be set; ownership is transferred to the server
54    /// so in most cases a client will duplicate a handle and transfer the
55    /// duplicate via this field.
56    pub vmo: Option<fidl::Vmo>,
57    #[doc(hidden)]
58    pub __source_breaking: fidl::marker::SourceBreaking,
59}
60
61impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
62    for AllocatorGetVmoInfoRequest
63{
64}
65
66#[derive(Debug, Default, PartialEq)]
67pub struct AllocatorGetVmoInfoResponse {
68    pub buffer_collection_id: Option<u64>,
69    pub buffer_index: Option<u64>,
70    pub close_weak_asap: Option<fidl::EventPair>,
71    #[doc(hidden)]
72    pub __source_breaking: fidl::marker::SourceBreaking,
73}
74
75impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
76    for AllocatorGetVmoInfoResponse
77{
78}
79
80#[derive(Debug, Default, PartialEq)]
81pub struct BufferCollectionAttachLifetimeTrackingRequest {
82    pub server_end: Option<fidl::EventPair>,
83    pub buffers_remaining: Option<u32>,
84    #[doc(hidden)]
85    pub __source_breaking: fidl::marker::SourceBreaking,
86}
87
88impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
89    for BufferCollectionAttachLifetimeTrackingRequest
90{
91}
92
93#[derive(Debug, Default, PartialEq)]
94pub struct BufferCollectionAttachTokenRequest {
95    pub rights_attenuation_mask: Option<fidl::Rights>,
96    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
97    #[doc(hidden)]
98    pub __source_breaking: fidl::marker::SourceBreaking,
99}
100
101impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
102    for BufferCollectionAttachTokenRequest
103{
104}
105
106/// Information about a buffer collection and its buffers.
107#[derive(Debug, Default, PartialEq)]
108pub struct BufferCollectionInfo {
109    /// These settings apply to all the buffers in the initial buffer
110    /// allocation.
111    ///
112    /// This field will always be set by sysmem.
113    pub settings: Option<SingleBufferSettings>,
114    /// VMO handles (and vmo_usable_start offset) for each buffer in the
115    /// collection.
116    ///
117    /// The size of this vector is the buffer_count (buffer_count is not sent
118    /// separately).
119    ///
120    /// All buffer VMO handles have identical size and access rights.  The size
121    /// is in settings.buffer_settings.size_bytes.
122    ///
123    /// The VMO access rights are determined based on the usages which the
124    /// client specified when allocating the buffer collection.  For example, a
125    /// client which expressed a read-only usage will receive VMOs without write
126    /// rights.  In addition, the rights can be attenuated by the parameter to
127    /// BufferCollectionToken.Duplicate() calls.
128    ///
129    /// This field will always have VmoBuffer(s) in it, even if the participant
130    /// specifies usage whieh does not require VMO handles.  This permits such a
131    /// participant to know the vmo_usable_start values, in case that's of any
132    /// use to the participant.
133    ///
134    /// This field will always be set by sysmem, even if the participant doesn't
135    /// specify any buffer usage (but the [`fuchsia.sysmem2/VmoBuffer.vmo`]
136    /// sub-field within this field won't be set in that case).
137    pub buffers: Option<Vec<VmoBuffer>>,
138    /// This number is unique among all logical buffer collections per boot.
139    ///
140    /// This ID number will be the same for all BufferCollectionToken(s),
141    /// BufferCollection(s), and BufferCollectionTokenGroup(s) associated with
142    /// the same logical buffer collection (derived from the same root token
143    /// created with fuchsia.sysmem2.Allocator.CreateSharedCollection, or with
144    /// CreateNonSharedCollection).
145    ///
146    /// The same ID can be retrieved from a BufferCollectionToken,
147    /// BufferCollection, or BufferCollectionTokenGroup using
148    /// GetBufferCollectionId (at the cost of a round-trip to sysmem and back).
149    ///
150    /// This field will always be set by sysmem.
151    pub buffer_collection_id: Option<u64>,
152    #[doc(hidden)]
153    pub __source_breaking: fidl::marker::SourceBreaking,
154}
155
156impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for BufferCollectionInfo {}
157
158#[derive(Debug, Default, PartialEq)]
159pub struct BufferCollectionSetConstraintsRequest {
160    pub constraints: Option<BufferCollectionConstraints>,
161    #[doc(hidden)]
162    pub __source_breaking: fidl::marker::SourceBreaking,
163}
164
165impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
166    for BufferCollectionSetConstraintsRequest
167{
168}
169
170#[derive(Debug, Default, PartialEq)]
171pub struct BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
172    pub group_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>>,
173    #[doc(hidden)]
174    pub __source_breaking: fidl::marker::SourceBreaking,
175}
176
177impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
178    for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
179{
180}
181
182#[derive(Debug, Default, PartialEq)]
183pub struct BufferCollectionTokenDuplicateRequest {
184    pub rights_attenuation_mask: Option<fidl::Rights>,
185    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
186    #[doc(hidden)]
187    pub __source_breaking: fidl::marker::SourceBreaking,
188}
189
190impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
191    for BufferCollectionTokenDuplicateRequest
192{
193}
194
195#[derive(Debug, Default, PartialEq)]
196pub struct BufferCollectionTokenGroupCreateChildRequest {
197    /// Must be set.
198    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
199    /// If not set, the default is `ZX_RIGHT_SAME_RIGHTS`.
200    pub rights_attenuation_mask: Option<fidl::Rights>,
201    #[doc(hidden)]
202    pub __source_breaking: fidl::marker::SourceBreaking,
203}
204
205impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
206    for BufferCollectionTokenGroupCreateChildRequest
207{
208}
209
210#[derive(Debug, Default, PartialEq)]
211pub struct BufferCollectionTokenGroupCreateChildrenSyncResponse {
212    pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
213    #[doc(hidden)]
214    pub __source_breaking: fidl::marker::SourceBreaking,
215}
216
217impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
218    for BufferCollectionTokenGroupCreateChildrenSyncResponse
219{
220}
221
222#[derive(Debug, Default, PartialEq)]
223pub struct BufferCollectionTokenDuplicateSyncResponse {
224    pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
225    #[doc(hidden)]
226    pub __source_breaking: fidl::marker::SourceBreaking,
227}
228
229impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
230    for BufferCollectionTokenDuplicateSyncResponse
231{
232}
233
234#[derive(Debug, Default, PartialEq)]
235pub struct BufferCollectionWaitForAllBuffersAllocatedResponse {
236    pub buffer_collection_info: Option<BufferCollectionInfo>,
237    #[doc(hidden)]
238    pub __source_breaking: fidl::marker::SourceBreaking,
239}
240
241impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
242    for BufferCollectionWaitForAllBuffersAllocatedResponse
243{
244}
245
246#[derive(Debug, Default, PartialEq)]
247pub struct NodeAttachNodeTrackingRequest {
248    /// This field must be set. This evenpair end will be closed after the
249    /// `Node` is closed or failed and the node's buffer counts are no
250    /// longer in effect in the logical buffer collection.
251    pub server_end: Option<fidl::EventPair>,
252    #[doc(hidden)]
253    pub __source_breaking: fidl::marker::SourceBreaking,
254}
255
256impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
257    for NodeAttachNodeTrackingRequest
258{
259}
260
261#[derive(Debug, Default, PartialEq)]
262pub struct NodeIsAlternateForRequest {
263    pub node_ref: Option<fidl::Event>,
264    #[doc(hidden)]
265    pub __source_breaking: fidl::marker::SourceBreaking,
266}
267
268impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeIsAlternateForRequest {}
269
270#[derive(Debug, Default, PartialEq)]
271pub struct NodeSetWeakOkRequest {
272    pub for_child_nodes_also: Option<bool>,
273    #[doc(hidden)]
274    pub __source_breaking: fidl::marker::SourceBreaking,
275}
276
277impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeSetWeakOkRequest {}
278
279#[derive(Debug, Default, PartialEq)]
280pub struct NodeGetNodeRefResponse {
281    pub node_ref: Option<fidl::Event>,
282    #[doc(hidden)]
283    pub __source_breaking: fidl::marker::SourceBreaking,
284}
285
286impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeGetNodeRefResponse {}
287
288#[derive(Debug, Default, PartialEq)]
289pub struct VmoBuffer {
290    /// `vmo` can be un-set if a participant has only
291    /// [`fuchsia.sysmem2/BufferUsage.none`] set to `NONE_USAGE` (explicitly or
292    /// implicitly by [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
293    /// without `constraints` set).
294    pub vmo: Option<fidl::Vmo>,
295    /// Offset within the VMO of the first usable byte. Must be < the VMO's size
296    /// in bytes, and leave sufficient room for BufferMemorySettings.size_bytes
297    /// before the end of the VMO.
298    ///
299    /// Currently sysmem will always set this field to 0, and in future, sysmem
300    /// won't set this field to a non-zero value unless all participants have
301    /// explicitly indicated support for non-zero vmo_usable_start (this
302    /// mechanism does not exist as of this comment). A participant that hasn't
303    /// explicitly indicated support for non-zero vmo_usable_start (all current
304    /// clients) should implicitly assume this field is set to 0 without
305    /// actually checking this field.
306    pub vmo_usable_start: Option<u64>,
307    /// This field is set iff `vmo` is a sysmem weak VMO handle. The client must
308    /// keep `close_weak_asap` around for as long as `vmo`, and must notice
309    /// `ZX_EVENTPAIR_PEER_CLOSED`. If that signal occurs, the client must close
310    /// `vmo` asap. Not doing so is considered a VMO leak by the client and in
311    /// that case sysmem will eventually complain loudly via syslog (currently
312    /// 5s later).
313    pub close_weak_asap: Option<fidl::EventPair>,
314    #[doc(hidden)]
315    pub __source_breaking: fidl::marker::SourceBreaking,
316}
317
318impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {}
319
320#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
321pub struct AllocatorMarker;
322
323impl fidl::endpoints::ProtocolMarker for AllocatorMarker {
324    type Proxy = AllocatorProxy;
325    type RequestStream = AllocatorRequestStream;
326    #[cfg(target_os = "fuchsia")]
327    type SynchronousProxy = AllocatorSynchronousProxy;
328
329    const DEBUG_NAME: &'static str = "fuchsia.sysmem2.Allocator";
330}
331impl fidl::endpoints::DiscoverableProtocolMarker for AllocatorMarker {}
332pub type AllocatorGetVmoInfoResult = Result<AllocatorGetVmoInfoResponse, Error>;
333
334pub trait AllocatorProxyInterface: Send + Sync {
335    fn r#allocate_non_shared_collection(
336        &self,
337        payload: AllocatorAllocateNonSharedCollectionRequest,
338    ) -> Result<(), fidl::Error>;
339    fn r#allocate_shared_collection(
340        &self,
341        payload: AllocatorAllocateSharedCollectionRequest,
342    ) -> Result<(), fidl::Error>;
343    fn r#bind_shared_collection(
344        &self,
345        payload: AllocatorBindSharedCollectionRequest,
346    ) -> Result<(), fidl::Error>;
347    type ValidateBufferCollectionTokenResponseFut: std::future::Future<
348            Output = Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error>,
349        > + Send;
350    fn r#validate_buffer_collection_token(
351        &self,
352        payload: &AllocatorValidateBufferCollectionTokenRequest,
353    ) -> Self::ValidateBufferCollectionTokenResponseFut;
354    fn r#set_debug_client_info(
355        &self,
356        payload: &AllocatorSetDebugClientInfoRequest,
357    ) -> Result<(), fidl::Error>;
358    type GetVmoInfoResponseFut: std::future::Future<Output = Result<AllocatorGetVmoInfoResult, fidl::Error>>
359        + Send;
360    fn r#get_vmo_info(&self, payload: AllocatorGetVmoInfoRequest) -> Self::GetVmoInfoResponseFut;
361}
362#[derive(Debug)]
363#[cfg(target_os = "fuchsia")]
364pub struct AllocatorSynchronousProxy {
365    client: fidl::client::sync::Client,
366}
367
368#[cfg(target_os = "fuchsia")]
369impl fidl::endpoints::SynchronousProxy for AllocatorSynchronousProxy {
370    type Proxy = AllocatorProxy;
371    type Protocol = AllocatorMarker;
372
373    fn from_channel(inner: fidl::Channel) -> Self {
374        Self::new(inner)
375    }
376
377    fn into_channel(self) -> fidl::Channel {
378        self.client.into_channel()
379    }
380
381    fn as_channel(&self) -> &fidl::Channel {
382        self.client.as_channel()
383    }
384}
385
386#[cfg(target_os = "fuchsia")]
387impl AllocatorSynchronousProxy {
388    pub fn new(channel: fidl::Channel) -> Self {
389        let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
390        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
391    }
392
393    pub fn into_channel(self) -> fidl::Channel {
394        self.client.into_channel()
395    }
396
397    /// Waits until an event arrives and returns it. It is safe for other
398    /// threads to make concurrent requests while waiting for an event.
399    pub fn wait_for_event(
400        &self,
401        deadline: zx::MonotonicInstant,
402    ) -> Result<AllocatorEvent, fidl::Error> {
403        AllocatorEvent::decode(self.client.wait_for_event(deadline)?)
404    }
405
406    /// Allocates a buffer collection on behalf of a single client (aka
407    /// initiator) who is also the only participant (from the point of view of
408    /// sysmem).
409    ///
410    /// This call exists mainly for temp/testing purposes.  This call skips the
411    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
412    /// allow another participant to specify its constraints.
413    ///
414    /// Real clients are encouraged to use
415    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
416    /// let relevant participants directly convey their own constraints to
417    /// sysmem by sending `BufferCollectionToken`s to those participants.
418    ///
419    /// + request `collection_request` The server end of the
420    ///   [`fuchsia.sysmem2/BufferCollection`].
421    pub fn r#allocate_non_shared_collection(
422        &self,
423        mut payload: AllocatorAllocateNonSharedCollectionRequest,
424    ) -> Result<(), fidl::Error> {
425        self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
426            &mut payload,
427            0x5ca681f025a80e44,
428            fidl::encoding::DynamicFlags::FLEXIBLE,
429        )
430    }
431
432    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
433    ///
434    /// The `BufferCollectionToken` can be "duplicated" for distribution to
435    /// participants by using
436    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
437    /// `BufferCollectionToken` can be converted into a
438    /// [`fuchsia.sysmem2.BufferCollection`] using
439    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
440    ///
441    /// Buffer constraints can be set via
442    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
443    ///
444    /// Success/failure to populate the buffer collection with buffers can be
445    /// determined from
446    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
447    ///
448    /// Closing the client end of a `BufferCollectionToken` or
449    /// `BufferCollection` (without `Release` first) will fail all client ends
450    /// in the same failure domain, which by default is all client ends of the
451    /// buffer collection. See
452    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
453    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
454    /// separate failure domains within a buffer collection.
455    pub fn r#allocate_shared_collection(
456        &self,
457        mut payload: AllocatorAllocateSharedCollectionRequest,
458    ) -> Result<(), fidl::Error> {
459        self.client.send::<AllocatorAllocateSharedCollectionRequest>(
460            &mut payload,
461            0x11a19ff51f0b49c1,
462            fidl::encoding::DynamicFlags::FLEXIBLE,
463        )
464    }
465
466    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
467    /// [`fuchsia.sysmem2/BufferCollection`].
468    ///
469    /// At the time of sending this message, the buffer collection hasn't yet
470    /// been populated with buffers - the participant must first also send
471    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
472    /// `BufferCollection` client end.
473    ///
474    /// All `BufferCollectionToken`(s) duplicated from a root
475    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
476    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
477    /// existing `BufferCollection` client ends must have sent `SetConstraints`
478    /// before the logical BufferCollection will be populated with buffers (or
479    /// will fail if the overall set of constraints can't be satisfied).
480    ///
481    /// + request `token` The client endpoint of a channel whose server end was
482    ///   sent to sysmem using
483    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
484    ///   end was sent to sysmem using
485    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
486    ///   being "turned in" in exchange for a
487    ///   [`fuchsia.sysmem2/BufferCollection`].
488    /// + request `buffer_collection_request` The server end of a
489    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
490    ///   client end. The `BufferCollection` channel is a single participant's
491    ///   connection to the logical buffer collection. Typically there will be
492    ///   other participants with their own `BufferCollection` channel to the
493    ///   logical buffer collection.
494    pub fn r#bind_shared_collection(
495        &self,
496        mut payload: AllocatorBindSharedCollectionRequest,
497    ) -> Result<(), fidl::Error> {
498        self.client.send::<AllocatorBindSharedCollectionRequest>(
499            &mut payload,
500            0x550916b0dc1d5b4e,
501            fidl::encoding::DynamicFlags::FLEXIBLE,
502        )
503    }
504
505    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
506    /// the sysmem server.
507    ///
508    /// With this call, the client can determine whether an incoming token is a
509    /// real sysmem token that is known to the sysmem server, without any risk
510    /// of getting stuck waiting forever on a potentially fake token to complete
511    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
512    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
513    /// FIDL message). In cases where the client trusts the source of the token
514    /// to provide a real token, this call is not typically needed outside of
515    /// debugging.
516    ///
517    /// If the validate fails sometimes but succeeds other times, the source of
518    /// the token may itself not be calling
519    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
520    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
521    /// token but before sending the token to the current client. It may be more
522    /// convenient for the source to use
523    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
524    /// token(s), since that call has the sync step built in. Or, the buffer
525    /// collection may be failing before this call is processed by the sysmem
526    /// server, as buffer collection failure cleans up sysmem's tracking of
527    /// associated tokens.
528    ///
529    /// This call has no effect on any token.
530    ///
531    /// + request `token_server_koid` The koid of the server end of a channel
532    ///   that might be a BufferCollectionToken channel.  This can be obtained
533    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
534    /// - response `is_known` true means sysmem knew of the token at the time
535    ///   sysmem processed the request, but doesn't guarantee that the token is
536    ///   still valid by the time the client receives the reply. What it does
537    ///   guarantee is that the token at least was a real token, so a two-way
538    ///   call to the token won't stall forever (will fail or succeed fairly
539    ///   quickly, not stall). This can already be known implicitly if the
540    ///   source of the token can be trusted to provide a real token. A false
541    ///   value means the token wasn't known to sysmem at the time sysmem
542    ///   processed this call, but the token may have previously been valid, or
543    ///   may yet become valid. Or if the sender of the token isn't trusted to
544    ///   provide a real token, the token may be fake. It's the responsibility
545    ///   of the sender to sync with sysmem to ensure that previously
546    ///   created/duplicated token(s) are known to sysmem, before sending the
547    ///   token(s) to other participants.
548    pub fn r#validate_buffer_collection_token(
549        &self,
550        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
551        ___deadline: zx::MonotonicInstant,
552    ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
553        let _response = self.client.send_query::<
554            AllocatorValidateBufferCollectionTokenRequest,
555            fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
556        >(
557            payload,
558            0x4c5ee91b02a7e68d,
559            fidl::encoding::DynamicFlags::FLEXIBLE,
560            ___deadline,
561        )?
562        .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
563        Ok(_response)
564    }
565
566    /// Set information about the current client that can be used by sysmem to
567    /// help diagnose leaking memory and allocation stalls waiting for a
568    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
569    ///
570    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
571    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
572    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
573    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
574    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
575    /// these `BufferCollection`(s) have the same initial debug client info as
576    /// the token turned in to create the `BufferCollection`).
577    ///
578    /// This info can be subsequently overridden on a per-`Node` basis by
579    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
580    ///
581    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
582    /// `Allocator` is the most efficient way to ensure that all
583    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
584    /// set, and is also more efficient than separately sending the same debug
585    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
586    /// created [`fuchsia.sysmem2/Node`].
587    ///
588    /// + request `name` This can be an arbitrary string, but the current
589    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
590    /// + request `id` This can be an arbitrary id, but the current process ID
591    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
592    pub fn r#set_debug_client_info(
593        &self,
594        mut payload: &AllocatorSetDebugClientInfoRequest,
595    ) -> Result<(), fidl::Error> {
596        self.client.send::<AllocatorSetDebugClientInfoRequest>(
597            payload,
598            0x6f68f19a3f509c4d,
599            fidl::encoding::DynamicFlags::FLEXIBLE,
600        )
601    }
602
603    /// Given a handle to a sysmem-provided VMO, this returns additional info
604    /// about the corresponding sysmem logical buffer.
605    ///
606    /// Most callers will duplicate a VMO handle first and send the duplicate to
607    /// this call.
608    ///
609    /// If the client has created a child VMO of a sysmem-provided VMO, that
610    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
611    ///
612    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
613    /// - response `buffer_collection_id` The buffer collection ID, which is
614    ///   unique per logical buffer collection per boot.
615    /// - response `buffer_index` The buffer index of the buffer within the
616    ///   buffer collection. This is the same as the index of the buffer within
617    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
618    ///   is the same for all sysmem-delivered VMOs corresponding to the same
619    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
620    ///   only unique across buffers of a buffer collection. For a given buffer,
621    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
622    ///   per boot.
623    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
624    ///   the `close_weak_asap` field will be set in the response. This handle
625    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
626    ///   the buffer should be closed as soon as possible. This is signalled
627    ///   shortly after all strong sysmem VMOs to the buffer are closed
628    ///   (including any held indirectly via strong `BufferCollectionToken` or
629    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
630    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
631    ///   considered a VMO leak caused by the client still holding a weak sysmem
632    ///   VMO handle and results in loud complaints to the log by sysmem. The
633    ///   buffers of a collection can be freed independently of each other. The
634    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
635    ///   response arrives at the client. A client that isn't prepared to handle
636    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
637    ///   the buffer and fail any associated request.
638    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
639    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
640    ///   the VMO handle passed in to this call itself keeps the VMO's info
641    ///   alive for purposes of responding to this call. Because of this,
642    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
643    ///   handles to the VMO when calling; even if other handles are closed
644    ///   before the GetVmoInfo response arrives at the client).
645    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
646    ///   capable of being used with GetVmoInfo due to rights/capability
647    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
648    ///   topic [`ZX_INFO_HANDLE_BASIC`].
649    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
650    ///   unspecified reason. See the log for more info.
651    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
652    ///   wasn't set, or there was some other problem with the request field(s).
653    pub fn r#get_vmo_info(
654        &self,
655        mut payload: AllocatorGetVmoInfoRequest,
656        ___deadline: zx::MonotonicInstant,
657    ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
658        let _response = self.client.send_query::<
659            AllocatorGetVmoInfoRequest,
660            fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
661        >(
662            &mut payload,
663            0x21a881120aa0ddf9,
664            fidl::encoding::DynamicFlags::FLEXIBLE,
665            ___deadline,
666        )?
667        .into_result::<AllocatorMarker>("get_vmo_info")?;
668        Ok(_response.map(|x| x))
669    }
670}
671
672#[cfg(target_os = "fuchsia")]
673impl From<AllocatorSynchronousProxy> for zx::Handle {
674    fn from(value: AllocatorSynchronousProxy) -> Self {
675        value.into_channel().into()
676    }
677}
678
679#[cfg(target_os = "fuchsia")]
680impl From<fidl::Channel> for AllocatorSynchronousProxy {
681    fn from(value: fidl::Channel) -> Self {
682        Self::new(value)
683    }
684}
685
686#[cfg(target_os = "fuchsia")]
687impl fidl::endpoints::FromClient for AllocatorSynchronousProxy {
688    type Protocol = AllocatorMarker;
689
690    fn from_client(value: fidl::endpoints::ClientEnd<AllocatorMarker>) -> Self {
691        Self::new(value.into_channel())
692    }
693}
694
695#[derive(Debug, Clone)]
696pub struct AllocatorProxy {
697    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
698}
699
700impl fidl::endpoints::Proxy for AllocatorProxy {
701    type Protocol = AllocatorMarker;
702
703    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
704        Self::new(inner)
705    }
706
707    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
708        self.client.into_channel().map_err(|client| Self { client })
709    }
710
711    fn as_channel(&self) -> &::fidl::AsyncChannel {
712        self.client.as_channel()
713    }
714}
715
716impl AllocatorProxy {
717    /// Create a new Proxy for fuchsia.sysmem2/Allocator.
718    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
719        let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
720        Self { client: fidl::client::Client::new(channel, protocol_name) }
721    }
722
723    /// Get a Stream of events from the remote end of the protocol.
724    ///
725    /// # Panics
726    ///
727    /// Panics if the event stream was already taken.
728    pub fn take_event_stream(&self) -> AllocatorEventStream {
729        AllocatorEventStream { event_receiver: self.client.take_event_receiver() }
730    }
731
732    /// Allocates a buffer collection on behalf of a single client (aka
733    /// initiator) who is also the only participant (from the point of view of
734    /// sysmem).
735    ///
736    /// This call exists mainly for temp/testing purposes.  This call skips the
737    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
738    /// allow another participant to specify its constraints.
739    ///
740    /// Real clients are encouraged to use
741    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
742    /// let relevant participants directly convey their own constraints to
743    /// sysmem by sending `BufferCollectionToken`s to those participants.
744    ///
745    /// + request `collection_request` The server end of the
746    ///   [`fuchsia.sysmem2/BufferCollection`].
747    pub fn r#allocate_non_shared_collection(
748        &self,
749        mut payload: AllocatorAllocateNonSharedCollectionRequest,
750    ) -> Result<(), fidl::Error> {
751        AllocatorProxyInterface::r#allocate_non_shared_collection(self, payload)
752    }
753
754    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
755    ///
756    /// The `BufferCollectionToken` can be "duplicated" for distribution to
757    /// participants by using
758    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
759    /// `BufferCollectionToken` can be converted into a
760    /// [`fuchsia.sysmem2.BufferCollection`] using
761    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
762    ///
763    /// Buffer constraints can be set via
764    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
765    ///
766    /// Success/failure to populate the buffer collection with buffers can be
767    /// determined from
768    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
769    ///
770    /// Closing the client end of a `BufferCollectionToken` or
771    /// `BufferCollection` (without `Release` first) will fail all client ends
772    /// in the same failure domain, which by default is all client ends of the
773    /// buffer collection. See
774    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
775    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
776    /// separate failure domains within a buffer collection.
777    pub fn r#allocate_shared_collection(
778        &self,
779        mut payload: AllocatorAllocateSharedCollectionRequest,
780    ) -> Result<(), fidl::Error> {
781        AllocatorProxyInterface::r#allocate_shared_collection(self, payload)
782    }
783
784    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
785    /// [`fuchsia.sysmem2/BufferCollection`].
786    ///
787    /// At the time of sending this message, the buffer collection hasn't yet
788    /// been populated with buffers - the participant must first also send
789    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
790    /// `BufferCollection` client end.
791    ///
792    /// All `BufferCollectionToken`(s) duplicated from a root
793    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
794    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
795    /// existing `BufferCollection` client ends must have sent `SetConstraints`
796    /// before the logical BufferCollection will be populated with buffers (or
797    /// will fail if the overall set of constraints can't be satisfied).
798    ///
799    /// + request `token` The client endpoint of a channel whose server end was
800    ///   sent to sysmem using
801    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
802    ///   end was sent to sysmem using
803    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
804    ///   being "turned in" in exchange for a
805    ///   [`fuchsia.sysmem2/BufferCollection`].
806    /// + request `buffer_collection_request` The server end of a
807    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
808    ///   client end. The `BufferCollection` channel is a single participant's
809    ///   connection to the logical buffer collection. Typically there will be
810    ///   other participants with their own `BufferCollection` channel to the
811    ///   logical buffer collection.
812    pub fn r#bind_shared_collection(
813        &self,
814        mut payload: AllocatorBindSharedCollectionRequest,
815    ) -> Result<(), fidl::Error> {
816        AllocatorProxyInterface::r#bind_shared_collection(self, payload)
817    }
818
819    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
820    /// the sysmem server.
821    ///
822    /// With this call, the client can determine whether an incoming token is a
823    /// real sysmem token that is known to the sysmem server, without any risk
824    /// of getting stuck waiting forever on a potentially fake token to complete
825    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
826    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
827    /// FIDL message). In cases where the client trusts the source of the token
828    /// to provide a real token, this call is not typically needed outside of
829    /// debugging.
830    ///
831    /// If the validate fails sometimes but succeeds other times, the source of
832    /// the token may itself not be calling
833    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
834    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
835    /// token but before sending the token to the current client. It may be more
836    /// convenient for the source to use
837    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
838    /// token(s), since that call has the sync step built in. Or, the buffer
839    /// collection may be failing before this call is processed by the sysmem
840    /// server, as buffer collection failure cleans up sysmem's tracking of
841    /// associated tokens.
842    ///
843    /// This call has no effect on any token.
844    ///
845    /// + request `token_server_koid` The koid of the server end of a channel
846    ///   that might be a BufferCollectionToken channel.  This can be obtained
847    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
848    /// - response `is_known` true means sysmem knew of the token at the time
849    ///   sysmem processed the request, but doesn't guarantee that the token is
850    ///   still valid by the time the client receives the reply. What it does
851    ///   guarantee is that the token at least was a real token, so a two-way
852    ///   call to the token won't stall forever (will fail or succeed fairly
853    ///   quickly, not stall). This can already be known implicitly if the
854    ///   source of the token can be trusted to provide a real token. A false
855    ///   value means the token wasn't known to sysmem at the time sysmem
856    ///   processed this call, but the token may have previously been valid, or
857    ///   may yet become valid. Or if the sender of the token isn't trusted to
858    ///   provide a real token, the token may be fake. It's the responsibility
859    ///   of the sender to sync with sysmem to ensure that previously
860    ///   created/duplicated token(s) are known to sysmem, before sending the
861    ///   token(s) to other participants.
862    pub fn r#validate_buffer_collection_token(
863        &self,
864        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
865    ) -> fidl::client::QueryResponseFut<
866        AllocatorValidateBufferCollectionTokenResponse,
867        fidl::encoding::DefaultFuchsiaResourceDialect,
868    > {
869        AllocatorProxyInterface::r#validate_buffer_collection_token(self, payload)
870    }
871
872    /// Set information about the current client that can be used by sysmem to
873    /// help diagnose leaking memory and allocation stalls waiting for a
874    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
875    ///
876    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
877    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
878    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
879    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
880    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
881    /// these `BufferCollection`(s) have the same initial debug client info as
882    /// the token turned in to create the `BufferCollection`).
883    ///
884    /// This info can be subsequently overridden on a per-`Node` basis by
885    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
886    ///
887    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
888    /// `Allocator` is the most efficient way to ensure that all
889    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
890    /// set, and is also more efficient than separately sending the same debug
891    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
892    /// created [`fuchsia.sysmem2/Node`].
893    ///
894    /// + request `name` This can be an arbitrary string, but the current
895    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
896    /// + request `id` This can be an arbitrary id, but the current process ID
897    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
898    pub fn r#set_debug_client_info(
899        &self,
900        mut payload: &AllocatorSetDebugClientInfoRequest,
901    ) -> Result<(), fidl::Error> {
902        AllocatorProxyInterface::r#set_debug_client_info(self, payload)
903    }
904
905    /// Given a handle to a sysmem-provided VMO, this returns additional info
906    /// about the corresponding sysmem logical buffer.
907    ///
908    /// Most callers will duplicate a VMO handle first and send the duplicate to
909    /// this call.
910    ///
911    /// If the client has created a child VMO of a sysmem-provided VMO, that
912    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
913    ///
914    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
915    /// - response `buffer_collection_id` The buffer collection ID, which is
916    ///   unique per logical buffer collection per boot.
917    /// - response `buffer_index` The buffer index of the buffer within the
918    ///   buffer collection. This is the same as the index of the buffer within
919    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
920    ///   is the same for all sysmem-delivered VMOs corresponding to the same
921    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
922    ///   only unique across buffers of a buffer collection. For a given buffer,
923    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
924    ///   per boot.
925    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
926    ///   the `close_weak_asap` field will be set in the response. This handle
927    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
928    ///   the buffer should be closed as soon as possible. This is signalled
929    ///   shortly after all strong sysmem VMOs to the buffer are closed
930    ///   (including any held indirectly via strong `BufferCollectionToken` or
931    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
932    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
933    ///   considered a VMO leak caused by the client still holding a weak sysmem
934    ///   VMO handle and results in loud complaints to the log by sysmem. The
935    ///   buffers of a collection can be freed independently of each other. The
936    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
937    ///   response arrives at the client. A client that isn't prepared to handle
938    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
939    ///   the buffer and fail any associated request.
940    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
941    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
942    ///   the VMO handle passed in to this call itself keeps the VMO's info
943    ///   alive for purposes of responding to this call. Because of this,
944    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
945    ///   handles to the VMO when calling; even if other handles are closed
946    ///   before the GetVmoInfo response arrives at the client).
947    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
948    ///   capable of being used with GetVmoInfo due to rights/capability
949    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
950    ///   topic [`ZX_INFO_HANDLE_BASIC`].
951    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
952    ///   unspecified reason. See the log for more info.
953    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
954    ///   wasn't set, or there was some other problem with the request field(s).
955    pub fn r#get_vmo_info(
956        &self,
957        mut payload: AllocatorGetVmoInfoRequest,
958    ) -> fidl::client::QueryResponseFut<
959        AllocatorGetVmoInfoResult,
960        fidl::encoding::DefaultFuchsiaResourceDialect,
961    > {
962        AllocatorProxyInterface::r#get_vmo_info(self, payload)
963    }
964}
965
966impl AllocatorProxyInterface for AllocatorProxy {
967    fn r#allocate_non_shared_collection(
968        &self,
969        mut payload: AllocatorAllocateNonSharedCollectionRequest,
970    ) -> Result<(), fidl::Error> {
971        self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
972            &mut payload,
973            0x5ca681f025a80e44,
974            fidl::encoding::DynamicFlags::FLEXIBLE,
975        )
976    }
977
978    fn r#allocate_shared_collection(
979        &self,
980        mut payload: AllocatorAllocateSharedCollectionRequest,
981    ) -> Result<(), fidl::Error> {
982        self.client.send::<AllocatorAllocateSharedCollectionRequest>(
983            &mut payload,
984            0x11a19ff51f0b49c1,
985            fidl::encoding::DynamicFlags::FLEXIBLE,
986        )
987    }
988
989    fn r#bind_shared_collection(
990        &self,
991        mut payload: AllocatorBindSharedCollectionRequest,
992    ) -> Result<(), fidl::Error> {
993        self.client.send::<AllocatorBindSharedCollectionRequest>(
994            &mut payload,
995            0x550916b0dc1d5b4e,
996            fidl::encoding::DynamicFlags::FLEXIBLE,
997        )
998    }
999
1000    type ValidateBufferCollectionTokenResponseFut = fidl::client::QueryResponseFut<
1001        AllocatorValidateBufferCollectionTokenResponse,
1002        fidl::encoding::DefaultFuchsiaResourceDialect,
1003    >;
1004    fn r#validate_buffer_collection_token(
1005        &self,
1006        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
1007    ) -> Self::ValidateBufferCollectionTokenResponseFut {
1008        fn _decode(
1009            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1010        ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
1011            let _response = fidl::client::decode_transaction_body::<
1012                fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
1013                fidl::encoding::DefaultFuchsiaResourceDialect,
1014                0x4c5ee91b02a7e68d,
1015            >(_buf?)?
1016            .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
1017            Ok(_response)
1018        }
1019        self.client.send_query_and_decode::<
1020            AllocatorValidateBufferCollectionTokenRequest,
1021            AllocatorValidateBufferCollectionTokenResponse,
1022        >(
1023            payload,
1024            0x4c5ee91b02a7e68d,
1025            fidl::encoding::DynamicFlags::FLEXIBLE,
1026            _decode,
1027        )
1028    }
1029
1030    fn r#set_debug_client_info(
1031        &self,
1032        mut payload: &AllocatorSetDebugClientInfoRequest,
1033    ) -> Result<(), fidl::Error> {
1034        self.client.send::<AllocatorSetDebugClientInfoRequest>(
1035            payload,
1036            0x6f68f19a3f509c4d,
1037            fidl::encoding::DynamicFlags::FLEXIBLE,
1038        )
1039    }
1040
1041    type GetVmoInfoResponseFut = fidl::client::QueryResponseFut<
1042        AllocatorGetVmoInfoResult,
1043        fidl::encoding::DefaultFuchsiaResourceDialect,
1044    >;
1045    fn r#get_vmo_info(
1046        &self,
1047        mut payload: AllocatorGetVmoInfoRequest,
1048    ) -> Self::GetVmoInfoResponseFut {
1049        fn _decode(
1050            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1051        ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
1052            let _response = fidl::client::decode_transaction_body::<
1053                fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
1054                fidl::encoding::DefaultFuchsiaResourceDialect,
1055                0x21a881120aa0ddf9,
1056            >(_buf?)?
1057            .into_result::<AllocatorMarker>("get_vmo_info")?;
1058            Ok(_response.map(|x| x))
1059        }
1060        self.client.send_query_and_decode::<AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResult>(
1061            &mut payload,
1062            0x21a881120aa0ddf9,
1063            fidl::encoding::DynamicFlags::FLEXIBLE,
1064            _decode,
1065        )
1066    }
1067}
1068
1069pub struct AllocatorEventStream {
1070    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
1071}
1072
1073impl std::marker::Unpin for AllocatorEventStream {}
1074
1075impl futures::stream::FusedStream for AllocatorEventStream {
1076    fn is_terminated(&self) -> bool {
1077        self.event_receiver.is_terminated()
1078    }
1079}
1080
1081impl futures::Stream for AllocatorEventStream {
1082    type Item = Result<AllocatorEvent, fidl::Error>;
1083
1084    fn poll_next(
1085        mut self: std::pin::Pin<&mut Self>,
1086        cx: &mut std::task::Context<'_>,
1087    ) -> std::task::Poll<Option<Self::Item>> {
1088        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
1089            &mut self.event_receiver,
1090            cx
1091        )?) {
1092            Some(buf) => std::task::Poll::Ready(Some(AllocatorEvent::decode(buf))),
1093            None => std::task::Poll::Ready(None),
1094        }
1095    }
1096}
1097
1098#[derive(Debug)]
1099pub enum AllocatorEvent {
1100    #[non_exhaustive]
1101    _UnknownEvent {
1102        /// Ordinal of the event that was sent.
1103        ordinal: u64,
1104    },
1105}
1106
1107impl AllocatorEvent {
1108    /// Decodes a message buffer as a [`AllocatorEvent`].
1109    fn decode(
1110        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
1111    ) -> Result<AllocatorEvent, fidl::Error> {
1112        let (bytes, _handles) = buf.split_mut();
1113        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1114        debug_assert_eq!(tx_header.tx_id, 0);
1115        match tx_header.ordinal {
1116            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
1117                Ok(AllocatorEvent::_UnknownEvent { ordinal: tx_header.ordinal })
1118            }
1119            _ => Err(fidl::Error::UnknownOrdinal {
1120                ordinal: tx_header.ordinal,
1121                protocol_name: <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1122            }),
1123        }
1124    }
1125}
1126
1127/// A Stream of incoming requests for fuchsia.sysmem2/Allocator.
1128pub struct AllocatorRequestStream {
1129    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1130    is_terminated: bool,
1131}
1132
1133impl std::marker::Unpin for AllocatorRequestStream {}
1134
1135impl futures::stream::FusedStream for AllocatorRequestStream {
1136    fn is_terminated(&self) -> bool {
1137        self.is_terminated
1138    }
1139}
1140
1141impl fidl::endpoints::RequestStream for AllocatorRequestStream {
1142    type Protocol = AllocatorMarker;
1143    type ControlHandle = AllocatorControlHandle;
1144
1145    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
1146        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
1147    }
1148
1149    fn control_handle(&self) -> Self::ControlHandle {
1150        AllocatorControlHandle { inner: self.inner.clone() }
1151    }
1152
1153    fn into_inner(
1154        self,
1155    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
1156    {
1157        (self.inner, self.is_terminated)
1158    }
1159
1160    fn from_inner(
1161        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1162        is_terminated: bool,
1163    ) -> Self {
1164        Self { inner, is_terminated }
1165    }
1166}
1167
1168impl futures::Stream for AllocatorRequestStream {
1169    type Item = Result<AllocatorRequest, fidl::Error>;
1170
1171    fn poll_next(
1172        mut self: std::pin::Pin<&mut Self>,
1173        cx: &mut std::task::Context<'_>,
1174    ) -> std::task::Poll<Option<Self::Item>> {
1175        let this = &mut *self;
1176        if this.inner.check_shutdown(cx) {
1177            this.is_terminated = true;
1178            return std::task::Poll::Ready(None);
1179        }
1180        if this.is_terminated {
1181            panic!("polled AllocatorRequestStream after completion");
1182        }
1183        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
1184            |bytes, handles| {
1185                match this.inner.channel().read_etc(cx, bytes, handles) {
1186                    std::task::Poll::Ready(Ok(())) => {}
1187                    std::task::Poll::Pending => return std::task::Poll::Pending,
1188                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
1189                        this.is_terminated = true;
1190                        return std::task::Poll::Ready(None);
1191                    }
1192                    std::task::Poll::Ready(Err(e)) => {
1193                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
1194                            e.into(),
1195                        ))));
1196                    }
1197                }
1198
1199                // A message has been received from the channel
1200                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1201
1202                std::task::Poll::Ready(Some(match header.ordinal {
1203                    0x5ca681f025a80e44 => {
1204                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1205                        let mut req = fidl::new_empty!(
1206                            AllocatorAllocateNonSharedCollectionRequest,
1207                            fidl::encoding::DefaultFuchsiaResourceDialect
1208                        );
1209                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateNonSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1210                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1211                        Ok(AllocatorRequest::AllocateNonSharedCollection {
1212                            payload: req,
1213                            control_handle,
1214                        })
1215                    }
1216                    0x11a19ff51f0b49c1 => {
1217                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1218                        let mut req = fidl::new_empty!(
1219                            AllocatorAllocateSharedCollectionRequest,
1220                            fidl::encoding::DefaultFuchsiaResourceDialect
1221                        );
1222                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1223                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1224                        Ok(AllocatorRequest::AllocateSharedCollection {
1225                            payload: req,
1226                            control_handle,
1227                        })
1228                    }
1229                    0x550916b0dc1d5b4e => {
1230                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1231                        let mut req = fidl::new_empty!(
1232                            AllocatorBindSharedCollectionRequest,
1233                            fidl::encoding::DefaultFuchsiaResourceDialect
1234                        );
1235                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorBindSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1236                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1237                        Ok(AllocatorRequest::BindSharedCollection { payload: req, control_handle })
1238                    }
1239                    0x4c5ee91b02a7e68d => {
1240                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1241                        let mut req = fidl::new_empty!(
1242                            AllocatorValidateBufferCollectionTokenRequest,
1243                            fidl::encoding::DefaultFuchsiaResourceDialect
1244                        );
1245                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorValidateBufferCollectionTokenRequest>(&header, _body_bytes, handles, &mut req)?;
1246                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1247                        Ok(AllocatorRequest::ValidateBufferCollectionToken {
1248                            payload: req,
1249                            responder: AllocatorValidateBufferCollectionTokenResponder {
1250                                control_handle: std::mem::ManuallyDrop::new(control_handle),
1251                                tx_id: header.tx_id,
1252                            },
1253                        })
1254                    }
1255                    0x6f68f19a3f509c4d => {
1256                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1257                        let mut req = fidl::new_empty!(
1258                            AllocatorSetDebugClientInfoRequest,
1259                            fidl::encoding::DefaultFuchsiaResourceDialect
1260                        );
1261                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1262                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1263                        Ok(AllocatorRequest::SetDebugClientInfo { payload: req, control_handle })
1264                    }
1265                    0x21a881120aa0ddf9 => {
1266                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1267                        let mut req = fidl::new_empty!(
1268                            AllocatorGetVmoInfoRequest,
1269                            fidl::encoding::DefaultFuchsiaResourceDialect
1270                        );
1271                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorGetVmoInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1272                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1273                        Ok(AllocatorRequest::GetVmoInfo {
1274                            payload: req,
1275                            responder: AllocatorGetVmoInfoResponder {
1276                                control_handle: std::mem::ManuallyDrop::new(control_handle),
1277                                tx_id: header.tx_id,
1278                            },
1279                        })
1280                    }
1281                    _ if header.tx_id == 0
1282                        && header
1283                            .dynamic_flags()
1284                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1285                    {
1286                        Ok(AllocatorRequest::_UnknownMethod {
1287                            ordinal: header.ordinal,
1288                            control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1289                            method_type: fidl::MethodType::OneWay,
1290                        })
1291                    }
1292                    _ if header
1293                        .dynamic_flags()
1294                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1295                    {
1296                        this.inner.send_framework_err(
1297                            fidl::encoding::FrameworkErr::UnknownMethod,
1298                            header.tx_id,
1299                            header.ordinal,
1300                            header.dynamic_flags(),
1301                            (bytes, handles),
1302                        )?;
1303                        Ok(AllocatorRequest::_UnknownMethod {
1304                            ordinal: header.ordinal,
1305                            control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1306                            method_type: fidl::MethodType::TwoWay,
1307                        })
1308                    }
1309                    _ => Err(fidl::Error::UnknownOrdinal {
1310                        ordinal: header.ordinal,
1311                        protocol_name:
1312                            <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1313                    }),
1314                }))
1315            },
1316        )
1317    }
1318}
1319
1320/// Allocates system memory buffers.
1321///
1322/// Epitaphs are not used in this protocol.
1323#[derive(Debug)]
1324pub enum AllocatorRequest {
1325    /// Allocates a buffer collection on behalf of a single client (aka
1326    /// initiator) who is also the only participant (from the point of view of
1327    /// sysmem).
1328    ///
1329    /// This call exists mainly for temp/testing purposes.  This call skips the
1330    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
1331    /// allow another participant to specify its constraints.
1332    ///
1333    /// Real clients are encouraged to use
1334    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
1335    /// let relevant participants directly convey their own constraints to
1336    /// sysmem by sending `BufferCollectionToken`s to those participants.
1337    ///
1338    /// + request `collection_request` The server end of the
1339    ///   [`fuchsia.sysmem2/BufferCollection`].
1340    AllocateNonSharedCollection {
1341        payload: AllocatorAllocateNonSharedCollectionRequest,
1342        control_handle: AllocatorControlHandle,
1343    },
1344    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
1345    ///
1346    /// The `BufferCollectionToken` can be "duplicated" for distribution to
1347    /// participants by using
1348    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
1349    /// `BufferCollectionToken` can be converted into a
1350    /// [`fuchsia.sysmem2.BufferCollection`] using
1351    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
1352    ///
1353    /// Buffer constraints can be set via
1354    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1355    ///
1356    /// Success/failure to populate the buffer collection with buffers can be
1357    /// determined from
1358    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
1359    ///
1360    /// Closing the client end of a `BufferCollectionToken` or
1361    /// `BufferCollection` (without `Release` first) will fail all client ends
1362    /// in the same failure domain, which by default is all client ends of the
1363    /// buffer collection. See
1364    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
1365    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
1366    /// separate failure domains within a buffer collection.
1367    AllocateSharedCollection {
1368        payload: AllocatorAllocateSharedCollectionRequest,
1369        control_handle: AllocatorControlHandle,
1370    },
1371    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
1372    /// [`fuchsia.sysmem2/BufferCollection`].
1373    ///
1374    /// At the time of sending this message, the buffer collection hasn't yet
1375    /// been populated with buffers - the participant must first also send
1376    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
1377    /// `BufferCollection` client end.
1378    ///
1379    /// All `BufferCollectionToken`(s) duplicated from a root
1380    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
1381    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
1382    /// existing `BufferCollection` client ends must have sent `SetConstraints`
1383    /// before the logical BufferCollection will be populated with buffers (or
1384    /// will fail if the overall set of constraints can't be satisfied).
1385    ///
1386    /// + request `token` The client endpoint of a channel whose server end was
1387    ///   sent to sysmem using
1388    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
1389    ///   end was sent to sysmem using
1390    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
1391    ///   being "turned in" in exchange for a
1392    ///   [`fuchsia.sysmem2/BufferCollection`].
1393    /// + request `buffer_collection_request` The server end of a
1394    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
1395    ///   client end. The `BufferCollection` channel is a single participant's
1396    ///   connection to the logical buffer collection. Typically there will be
1397    ///   other participants with their own `BufferCollection` channel to the
1398    ///   logical buffer collection.
1399    BindSharedCollection {
1400        payload: AllocatorBindSharedCollectionRequest,
1401        control_handle: AllocatorControlHandle,
1402    },
1403    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
1404    /// the sysmem server.
1405    ///
1406    /// With this call, the client can determine whether an incoming token is a
1407    /// real sysmem token that is known to the sysmem server, without any risk
1408    /// of getting stuck waiting forever on a potentially fake token to complete
1409    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
1410    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
1411    /// FIDL message). In cases where the client trusts the source of the token
1412    /// to provide a real token, this call is not typically needed outside of
1413    /// debugging.
1414    ///
1415    /// If the validate fails sometimes but succeeds other times, the source of
1416    /// the token may itself not be calling
1417    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
1418    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
1419    /// token but before sending the token to the current client. It may be more
1420    /// convenient for the source to use
1421    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
1422    /// token(s), since that call has the sync step built in. Or, the buffer
1423    /// collection may be failing before this call is processed by the sysmem
1424    /// server, as buffer collection failure cleans up sysmem's tracking of
1425    /// associated tokens.
1426    ///
1427    /// This call has no effect on any token.
1428    ///
1429    /// + request `token_server_koid` The koid of the server end of a channel
1430    ///   that might be a BufferCollectionToken channel.  This can be obtained
1431    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
1432    /// - response `is_known` true means sysmem knew of the token at the time
1433    ///   sysmem processed the request, but doesn't guarantee that the token is
1434    ///   still valid by the time the client receives the reply. What it does
1435    ///   guarantee is that the token at least was a real token, so a two-way
1436    ///   call to the token won't stall forever (will fail or succeed fairly
1437    ///   quickly, not stall). This can already be known implicitly if the
1438    ///   source of the token can be trusted to provide a real token. A false
1439    ///   value means the token wasn't known to sysmem at the time sysmem
1440    ///   processed this call, but the token may have previously been valid, or
1441    ///   may yet become valid. Or if the sender of the token isn't trusted to
1442    ///   provide a real token, the token may be fake. It's the responsibility
1443    ///   of the sender to sync with sysmem to ensure that previously
1444    ///   created/duplicated token(s) are known to sysmem, before sending the
1445    ///   token(s) to other participants.
1446    ValidateBufferCollectionToken {
1447        payload: AllocatorValidateBufferCollectionTokenRequest,
1448        responder: AllocatorValidateBufferCollectionTokenResponder,
1449    },
1450    /// Set information about the current client that can be used by sysmem to
1451    /// help diagnose leaking memory and allocation stalls waiting for a
1452    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1453    ///
1454    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
1455    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
1456    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
1457    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
1458    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
1459    /// these `BufferCollection`(s) have the same initial debug client info as
1460    /// the token turned in to create the `BufferCollection`).
1461    ///
1462    /// This info can be subsequently overridden on a per-`Node` basis by
1463    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
1464    ///
1465    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
1466    /// `Allocator` is the most efficient way to ensure that all
1467    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
1468    /// set, and is also more efficient than separately sending the same debug
1469    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
1470    /// created [`fuchsia.sysmem2/Node`].
1471    ///
1472    /// + request `name` This can be an arbitrary string, but the current
1473    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
1474    /// + request `id` This can be an arbitrary id, but the current process ID
1475    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
1476    SetDebugClientInfo {
1477        payload: AllocatorSetDebugClientInfoRequest,
1478        control_handle: AllocatorControlHandle,
1479    },
1480    /// Given a handle to a sysmem-provided VMO, this returns additional info
1481    /// about the corresponding sysmem logical buffer.
1482    ///
1483    /// Most callers will duplicate a VMO handle first and send the duplicate to
1484    /// this call.
1485    ///
1486    /// If the client has created a child VMO of a sysmem-provided VMO, that
1487    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
1488    ///
1489    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
1490    /// - response `buffer_collection_id` The buffer collection ID, which is
1491    ///   unique per logical buffer collection per boot.
1492    /// - response `buffer_index` The buffer index of the buffer within the
1493    ///   buffer collection. This is the same as the index of the buffer within
1494    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
1495    ///   is the same for all sysmem-delivered VMOs corresponding to the same
1496    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
1497    ///   only unique across buffers of a buffer collection. For a given buffer,
1498    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
1499    ///   per boot.
1500    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
1501    ///   the `close_weak_asap` field will be set in the response. This handle
1502    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
1503    ///   the buffer should be closed as soon as possible. This is signalled
1504    ///   shortly after all strong sysmem VMOs to the buffer are closed
1505    ///   (including any held indirectly via strong `BufferCollectionToken` or
1506    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
1507    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
1508    ///   considered a VMO leak caused by the client still holding a weak sysmem
1509    ///   VMO handle and results in loud complaints to the log by sysmem. The
1510    ///   buffers of a collection can be freed independently of each other. The
1511    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
1512    ///   response arrives at the client. A client that isn't prepared to handle
1513    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
1514    ///   the buffer and fail any associated request.
1515    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
1516    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
1517    ///   the VMO handle passed in to this call itself keeps the VMO's info
1518    ///   alive for purposes of responding to this call. Because of this,
1519    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
1520    ///   handles to the VMO when calling; even if other handles are closed
1521    ///   before the GetVmoInfo response arrives at the client).
1522    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
1523    ///   capable of being used with GetVmoInfo due to rights/capability
1524    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
1525    ///   topic [`ZX_INFO_HANDLE_BASIC`].
1526    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
1527    ///   unspecified reason. See the log for more info.
1528    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
1529    ///   wasn't set, or there was some other problem with the request field(s).
1530    GetVmoInfo { payload: AllocatorGetVmoInfoRequest, responder: AllocatorGetVmoInfoResponder },
1531    /// An interaction was received which does not match any known method.
1532    #[non_exhaustive]
1533    _UnknownMethod {
1534        /// Ordinal of the method that was called.
1535        ordinal: u64,
1536        control_handle: AllocatorControlHandle,
1537        method_type: fidl::MethodType,
1538    },
1539}
1540
1541impl AllocatorRequest {
1542    #[allow(irrefutable_let_patterns)]
1543    pub fn into_allocate_non_shared_collection(
1544        self,
1545    ) -> Option<(AllocatorAllocateNonSharedCollectionRequest, AllocatorControlHandle)> {
1546        if let AllocatorRequest::AllocateNonSharedCollection { payload, control_handle } = self {
1547            Some((payload, control_handle))
1548        } else {
1549            None
1550        }
1551    }
1552
1553    #[allow(irrefutable_let_patterns)]
1554    pub fn into_allocate_shared_collection(
1555        self,
1556    ) -> Option<(AllocatorAllocateSharedCollectionRequest, AllocatorControlHandle)> {
1557        if let AllocatorRequest::AllocateSharedCollection { payload, control_handle } = self {
1558            Some((payload, control_handle))
1559        } else {
1560            None
1561        }
1562    }
1563
1564    #[allow(irrefutable_let_patterns)]
1565    pub fn into_bind_shared_collection(
1566        self,
1567    ) -> Option<(AllocatorBindSharedCollectionRequest, AllocatorControlHandle)> {
1568        if let AllocatorRequest::BindSharedCollection { payload, control_handle } = self {
1569            Some((payload, control_handle))
1570        } else {
1571            None
1572        }
1573    }
1574
1575    #[allow(irrefutable_let_patterns)]
1576    pub fn into_validate_buffer_collection_token(
1577        self,
1578    ) -> Option<(
1579        AllocatorValidateBufferCollectionTokenRequest,
1580        AllocatorValidateBufferCollectionTokenResponder,
1581    )> {
1582        if let AllocatorRequest::ValidateBufferCollectionToken { payload, responder } = self {
1583            Some((payload, responder))
1584        } else {
1585            None
1586        }
1587    }
1588
1589    #[allow(irrefutable_let_patterns)]
1590    pub fn into_set_debug_client_info(
1591        self,
1592    ) -> Option<(AllocatorSetDebugClientInfoRequest, AllocatorControlHandle)> {
1593        if let AllocatorRequest::SetDebugClientInfo { payload, control_handle } = self {
1594            Some((payload, control_handle))
1595        } else {
1596            None
1597        }
1598    }
1599
1600    #[allow(irrefutable_let_patterns)]
1601    pub fn into_get_vmo_info(
1602        self,
1603    ) -> Option<(AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResponder)> {
1604        if let AllocatorRequest::GetVmoInfo { payload, responder } = self {
1605            Some((payload, responder))
1606        } else {
1607            None
1608        }
1609    }
1610
1611    /// Name of the method defined in FIDL
1612    pub fn method_name(&self) -> &'static str {
1613        match *self {
1614            AllocatorRequest::AllocateNonSharedCollection { .. } => {
1615                "allocate_non_shared_collection"
1616            }
1617            AllocatorRequest::AllocateSharedCollection { .. } => "allocate_shared_collection",
1618            AllocatorRequest::BindSharedCollection { .. } => "bind_shared_collection",
1619            AllocatorRequest::ValidateBufferCollectionToken { .. } => {
1620                "validate_buffer_collection_token"
1621            }
1622            AllocatorRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
1623            AllocatorRequest::GetVmoInfo { .. } => "get_vmo_info",
1624            AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
1625                "unknown one-way method"
1626            }
1627            AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
1628                "unknown two-way method"
1629            }
1630        }
1631    }
1632}
1633
1634#[derive(Debug, Clone)]
1635pub struct AllocatorControlHandle {
1636    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1637}
1638
1639impl fidl::endpoints::ControlHandle for AllocatorControlHandle {
1640    fn shutdown(&self) {
1641        self.inner.shutdown()
1642    }
1643    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
1644        self.inner.shutdown_with_epitaph(status)
1645    }
1646
1647    fn is_closed(&self) -> bool {
1648        self.inner.channel().is_closed()
1649    }
1650    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
1651        self.inner.channel().on_closed()
1652    }
1653
1654    #[cfg(target_os = "fuchsia")]
1655    fn signal_peer(
1656        &self,
1657        clear_mask: zx::Signals,
1658        set_mask: zx::Signals,
1659    ) -> Result<(), zx_status::Status> {
1660        use fidl::Peered;
1661        self.inner.channel().signal_peer(clear_mask, set_mask)
1662    }
1663}
1664
1665impl AllocatorControlHandle {}
1666
1667#[must_use = "FIDL methods require a response to be sent"]
1668#[derive(Debug)]
1669pub struct AllocatorValidateBufferCollectionTokenResponder {
1670    control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1671    tx_id: u32,
1672}
1673
1674/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1675/// if the responder is dropped without sending a response, so that the client
1676/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1677impl std::ops::Drop for AllocatorValidateBufferCollectionTokenResponder {
1678    fn drop(&mut self) {
1679        self.control_handle.shutdown();
1680        // Safety: drops once, never accessed again
1681        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1682    }
1683}
1684
1685impl fidl::endpoints::Responder for AllocatorValidateBufferCollectionTokenResponder {
1686    type ControlHandle = AllocatorControlHandle;
1687
1688    fn control_handle(&self) -> &AllocatorControlHandle {
1689        &self.control_handle
1690    }
1691
1692    fn drop_without_shutdown(mut self) {
1693        // Safety: drops once, never accessed again due to mem::forget
1694        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1695        // Prevent Drop from running (which would shut down the channel)
1696        std::mem::forget(self);
1697    }
1698}
1699
1700impl AllocatorValidateBufferCollectionTokenResponder {
1701    /// Sends a response to the FIDL transaction.
1702    ///
1703    /// Sets the channel to shutdown if an error occurs.
1704    pub fn send(
1705        self,
1706        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1707    ) -> Result<(), fidl::Error> {
1708        let _result = self.send_raw(payload);
1709        if _result.is_err() {
1710            self.control_handle.shutdown();
1711        }
1712        self.drop_without_shutdown();
1713        _result
1714    }
1715
1716    /// Similar to "send" but does not shutdown the channel if an error occurs.
1717    pub fn send_no_shutdown_on_err(
1718        self,
1719        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1720    ) -> Result<(), fidl::Error> {
1721        let _result = self.send_raw(payload);
1722        self.drop_without_shutdown();
1723        _result
1724    }
1725
1726    fn send_raw(
1727        &self,
1728        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1729    ) -> Result<(), fidl::Error> {
1730        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
1731            AllocatorValidateBufferCollectionTokenResponse,
1732        >>(
1733            fidl::encoding::Flexible::new(payload),
1734            self.tx_id,
1735            0x4c5ee91b02a7e68d,
1736            fidl::encoding::DynamicFlags::FLEXIBLE,
1737        )
1738    }
1739}
1740
1741#[must_use = "FIDL methods require a response to be sent"]
1742#[derive(Debug)]
1743pub struct AllocatorGetVmoInfoResponder {
1744    control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1745    tx_id: u32,
1746}
1747
1748/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1749/// if the responder is dropped without sending a response, so that the client
1750/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1751impl std::ops::Drop for AllocatorGetVmoInfoResponder {
1752    fn drop(&mut self) {
1753        self.control_handle.shutdown();
1754        // Safety: drops once, never accessed again
1755        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1756    }
1757}
1758
1759impl fidl::endpoints::Responder for AllocatorGetVmoInfoResponder {
1760    type ControlHandle = AllocatorControlHandle;
1761
1762    fn control_handle(&self) -> &AllocatorControlHandle {
1763        &self.control_handle
1764    }
1765
1766    fn drop_without_shutdown(mut self) {
1767        // Safety: drops once, never accessed again due to mem::forget
1768        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1769        // Prevent Drop from running (which would shut down the channel)
1770        std::mem::forget(self);
1771    }
1772}
1773
1774impl AllocatorGetVmoInfoResponder {
1775    /// Sends a response to the FIDL transaction.
1776    ///
1777    /// Sets the channel to shutdown if an error occurs.
1778    pub fn send(
1779        self,
1780        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1781    ) -> Result<(), fidl::Error> {
1782        let _result = self.send_raw(result);
1783        if _result.is_err() {
1784            self.control_handle.shutdown();
1785        }
1786        self.drop_without_shutdown();
1787        _result
1788    }
1789
1790    /// Similar to "send" but does not shutdown the channel if an error occurs.
1791    pub fn send_no_shutdown_on_err(
1792        self,
1793        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1794    ) -> Result<(), fidl::Error> {
1795        let _result = self.send_raw(result);
1796        self.drop_without_shutdown();
1797        _result
1798    }
1799
1800    fn send_raw(
1801        &self,
1802        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1803    ) -> Result<(), fidl::Error> {
1804        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
1805            AllocatorGetVmoInfoResponse,
1806            Error,
1807        >>(
1808            fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
1809            self.tx_id,
1810            0x21a881120aa0ddf9,
1811            fidl::encoding::DynamicFlags::FLEXIBLE,
1812        )
1813    }
1814}
1815
1816#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
1817pub struct BufferCollectionMarker;
1818
1819impl fidl::endpoints::ProtocolMarker for BufferCollectionMarker {
1820    type Proxy = BufferCollectionProxy;
1821    type RequestStream = BufferCollectionRequestStream;
1822    #[cfg(target_os = "fuchsia")]
1823    type SynchronousProxy = BufferCollectionSynchronousProxy;
1824
1825    const DEBUG_NAME: &'static str = "(anonymous) BufferCollection";
1826}
1827pub type BufferCollectionWaitForAllBuffersAllocatedResult =
1828    Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>;
1829pub type BufferCollectionCheckAllBuffersAllocatedResult = Result<(), Error>;
1830
1831pub trait BufferCollectionProxyInterface: Send + Sync {
1832    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
1833    fn r#sync(&self) -> Self::SyncResponseFut;
1834    fn r#release(&self) -> Result<(), fidl::Error>;
1835    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
1836    fn r#set_debug_client_info(
1837        &self,
1838        payload: &NodeSetDebugClientInfoRequest,
1839    ) -> Result<(), fidl::Error>;
1840    fn r#set_debug_timeout_log_deadline(
1841        &self,
1842        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
1843    ) -> Result<(), fidl::Error>;
1844    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
1845    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
1846        + Send;
1847    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
1848    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
1849        + Send;
1850    fn r#is_alternate_for(
1851        &self,
1852        payload: NodeIsAlternateForRequest,
1853    ) -> Self::IsAlternateForResponseFut;
1854    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
1855        + Send;
1856    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
1857    fn r#set_weak(&self) -> Result<(), fidl::Error>;
1858    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
1859    fn r#attach_node_tracking(
1860        &self,
1861        payload: NodeAttachNodeTrackingRequest,
1862    ) -> Result<(), fidl::Error>;
1863    fn r#set_constraints(
1864        &self,
1865        payload: BufferCollectionSetConstraintsRequest,
1866    ) -> Result<(), fidl::Error>;
1867    type WaitForAllBuffersAllocatedResponseFut: std::future::Future<
1868            Output = Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error>,
1869        > + Send;
1870    fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut;
1871    type CheckAllBuffersAllocatedResponseFut: std::future::Future<
1872            Output = Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error>,
1873        > + Send;
1874    fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut;
1875    fn r#attach_token(
1876        &self,
1877        payload: BufferCollectionAttachTokenRequest,
1878    ) -> Result<(), fidl::Error>;
1879    fn r#attach_lifetime_tracking(
1880        &self,
1881        payload: BufferCollectionAttachLifetimeTrackingRequest,
1882    ) -> Result<(), fidl::Error>;
1883}
1884#[derive(Debug)]
1885#[cfg(target_os = "fuchsia")]
1886pub struct BufferCollectionSynchronousProxy {
1887    client: fidl::client::sync::Client,
1888}
1889
1890#[cfg(target_os = "fuchsia")]
1891impl fidl::endpoints::SynchronousProxy for BufferCollectionSynchronousProxy {
1892    type Proxy = BufferCollectionProxy;
1893    type Protocol = BufferCollectionMarker;
1894
1895    fn from_channel(inner: fidl::Channel) -> Self {
1896        Self::new(inner)
1897    }
1898
1899    fn into_channel(self) -> fidl::Channel {
1900        self.client.into_channel()
1901    }
1902
1903    fn as_channel(&self) -> &fidl::Channel {
1904        self.client.as_channel()
1905    }
1906}
1907
1908#[cfg(target_os = "fuchsia")]
1909impl BufferCollectionSynchronousProxy {
1910    pub fn new(channel: fidl::Channel) -> Self {
1911        let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
1912        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
1913    }
1914
1915    pub fn into_channel(self) -> fidl::Channel {
1916        self.client.into_channel()
1917    }
1918
1919    /// Waits until an event arrives and returns it. It is safe for other
1920    /// threads to make concurrent requests while waiting for an event.
1921    pub fn wait_for_event(
1922        &self,
1923        deadline: zx::MonotonicInstant,
1924    ) -> Result<BufferCollectionEvent, fidl::Error> {
1925        BufferCollectionEvent::decode(self.client.wait_for_event(deadline)?)
1926    }
1927
1928    /// Ensure that previous messages have been received server side. This is
1929    /// particularly useful after previous messages that created new tokens,
1930    /// because a token must be known to the sysmem server before sending the
1931    /// token to another participant.
1932    ///
1933    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
1934    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
1935    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
1936    /// to mitigate the possibility of a hostile/fake
1937    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
1938    /// Another way is to pass the token to
1939    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
1940    /// the token as part of exchanging it for a
1941    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
1942    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
1943    /// of stalling.
1944    ///
1945    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
1946    /// and then starting and completing a `Sync`, it's then safe to send the
1947    /// `BufferCollectionToken` client ends to other participants knowing the
1948    /// server will recognize the tokens when they're sent by the other
1949    /// participants to sysmem in a
1950    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
1951    /// efficient way to create tokens while avoiding unnecessary round trips.
1952    ///
1953    /// Other options include waiting for each
1954    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
1955    /// individually (using separate call to `Sync` after each), or calling
1956    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
1957    /// converted to a `BufferCollection` via
1958    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
1959    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
1960    /// the sync step and can create multiple tokens at once.
1961    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
1962        let _response = self.client.send_query::<
1963            fidl::encoding::EmptyPayload,
1964            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
1965        >(
1966            (),
1967            0x11ac2555cf575b54,
1968            fidl::encoding::DynamicFlags::FLEXIBLE,
1969            ___deadline,
1970        )?
1971        .into_result::<BufferCollectionMarker>("sync")?;
1972        Ok(_response)
1973    }
1974
1975    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
1976    ///
1977    /// Normally a participant will convert a `BufferCollectionToken` into a
1978    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
1979    /// `Release` via the token (and then close the channel immediately or
1980    /// shortly later in response to server closing the server end), which
1981    /// avoids causing buffer collection failure. Without a prior `Release`,
1982    /// closing the `BufferCollectionToken` client end will cause buffer
1983    /// collection failure.
1984    ///
1985    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
1986    ///
1987    /// By default the server handles unexpected closure of a
1988    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
1989    /// first) by failing the buffer collection. Partly this is to expedite
1990    /// closing VMO handles to reclaim memory when any participant fails. If a
1991    /// participant would like to cleanly close a `BufferCollection` without
1992    /// causing buffer collection failure, the participant can send `Release`
1993    /// before closing the `BufferCollection` client end. The `Release` can
1994    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
1995    /// buffer collection won't require constraints from this node in order to
1996    /// allocate. If after `SetConstraints`, the constraints are retained and
1997    /// aggregated, despite the lack of `BufferCollection` connection at the
1998    /// time of constraints aggregation.
1999    ///
2000    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2001    ///
2002    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2003    /// end (without `Release` first) will trigger failure of the buffer
2004    /// collection. To close a `BufferCollectionTokenGroup` channel without
2005    /// failing the buffer collection, ensure that AllChildrenPresent() has been
2006    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2007    /// client end.
2008    ///
2009    /// If `Release` occurs before
2010    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2011    /// buffer collection will fail (triggered by reception of `Release` without
2012    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2013    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2014    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2015    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2016    /// close requires `AllChildrenPresent` (if not already sent), then
2017    /// `Release`, then close client end.
2018    ///
2019    /// If `Release` occurs after `AllChildrenPresent`, the children and all
2020    /// their constraints remain intact (just as they would if the
2021    /// `BufferCollectionTokenGroup` channel had remained open), and the client
2022    /// end close doesn't trigger buffer collection failure.
2023    ///
2024    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2025    ///
2026    /// For brevity, the per-channel-protocol paragraphs above ignore the
2027    /// separate failure domain created by
2028    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2029    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2030    /// unexpectedly closes (without `Release` first) and that client end is
2031    /// under a failure domain, instead of failing the whole buffer collection,
2032    /// the failure domain is failed, but the buffer collection itself is
2033    /// isolated from failure of the failure domain. Such failure domains can be
2034    /// nested, in which case only the inner-most failure domain in which the
2035    /// `Node` resides fails.
2036    pub fn r#release(&self) -> Result<(), fidl::Error> {
2037        self.client.send::<fidl::encoding::EmptyPayload>(
2038            (),
2039            0x6a5cae7d6d6e04c6,
2040            fidl::encoding::DynamicFlags::FLEXIBLE,
2041        )
2042    }
2043
2044    /// Set a name for VMOs in this buffer collection.
2045    ///
2046    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2047    /// will be truncated to fit. The name of the vmo will be suffixed with the
2048    /// buffer index within the collection (if the suffix fits within
2049    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2050    /// listed in the inspect data.
2051    ///
2052    /// The name only affects VMOs allocated after the name is set; this call
2053    /// does not rename existing VMOs. If multiple clients set different names
2054    /// then the larger priority value will win. Setting a new name with the
2055    /// same priority as a prior name doesn't change the name.
2056    ///
2057    /// All table fields are currently required.
2058    ///
2059    /// + request `priority` The name is only set if this is the first `SetName`
2060    ///   or if `priority` is greater than any previous `priority` value in
2061    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
2062    /// + request `name` The name for VMOs created under this buffer collection.
2063    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2064        self.client.send::<NodeSetNameRequest>(
2065            payload,
2066            0xb41f1624f48c1e9,
2067            fidl::encoding::DynamicFlags::FLEXIBLE,
2068        )
2069    }
2070
2071    /// Set information about the current client that can be used by sysmem to
2072    /// help diagnose leaking memory and allocation stalls waiting for a
2073    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2074    ///
2075    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2076    /// `Node`(s) derived from this `Node`, unless overriden by
2077    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2078    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2079    ///
2080    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2081    /// `Allocator` is the most efficient way to ensure that all
2082    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2083    /// set, and is also more efficient than separately sending the same debug
2084    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2085    /// created [`fuchsia.sysmem2/Node`].
2086    ///
2087    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2088    /// indicate which client is closing their channel first, leading to subtree
2089    /// failure (which can be normal if the purpose of the subtree is over, but
2090    /// if happening earlier than expected, the client-channel-specific name can
2091    /// help diagnose where the failure is first coming from, from sysmem's
2092    /// point of view).
2093    ///
2094    /// All table fields are currently required.
2095    ///
2096    /// + request `name` This can be an arbitrary string, but the current
2097    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
2098    /// + request `id` This can be an arbitrary id, but the current process ID
2099    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
2100    pub fn r#set_debug_client_info(
2101        &self,
2102        mut payload: &NodeSetDebugClientInfoRequest,
2103    ) -> Result<(), fidl::Error> {
2104        self.client.send::<NodeSetDebugClientInfoRequest>(
2105            payload,
2106            0x5cde8914608d99b1,
2107            fidl::encoding::DynamicFlags::FLEXIBLE,
2108        )
2109    }
2110
2111    /// Sysmem logs a warning if sysmem hasn't seen
2112    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
2113    /// within 5 seconds after creation of a new collection.
2114    ///
2115    /// Clients can call this method to change when the log is printed. If
2116    /// multiple client set the deadline, it's unspecified which deadline will
2117    /// take effect.
2118    ///
2119    /// In most cases the default works well.
2120    ///
2121    /// All table fields are currently required.
2122    ///
2123    /// + request `deadline` The time at which sysmem will start trying to log
2124    ///   the warning, unless all constraints are with sysmem by then.
2125    pub fn r#set_debug_timeout_log_deadline(
2126        &self,
2127        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
2128    ) -> Result<(), fidl::Error> {
2129        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
2130            payload,
2131            0x716b0af13d5c0806,
2132            fidl::encoding::DynamicFlags::FLEXIBLE,
2133        )
2134    }
2135
2136    /// This enables verbose logging for the buffer collection.
2137    ///
2138    /// Verbose logging includes constraints set via
2139    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
2140    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
2141    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
2142    /// the tree of `Node`(s).
2143    ///
2144    /// Normally sysmem prints only a single line complaint when aggregation
2145    /// fails, with just the specific detailed reason that aggregation failed,
2146    /// with little surrounding context.  While this is often enough to diagnose
2147    /// a problem if only a small change was made and everything was working
2148    /// before the small change, it's often not particularly helpful for getting
2149    /// a new buffer collection to work for the first time.  Especially with
2150    /// more complex trees of nodes, involving things like
2151    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
2152    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
2153    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
2154    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
2155    /// looks like and why it's failing a logical allocation, or why a tree or
2156    /// subtree is failing sooner than expected.
2157    ///
2158    /// The intent of the extra logging is to be acceptable from a performance
2159    /// point of view, under the assumption that verbose logging is only enabled
2160    /// on a low number of buffer collections. If we're not tracking down a bug,
2161    /// we shouldn't send this message.
2162    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
2163        self.client.send::<fidl::encoding::EmptyPayload>(
2164            (),
2165            0x5209c77415b4dfad,
2166            fidl::encoding::DynamicFlags::FLEXIBLE,
2167        )
2168    }
2169
2170    /// This gets a handle that can be used as a parameter to
2171    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
2172    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
2173    /// client obtained this handle from this `Node`.
2174    ///
2175    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
2176    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
2177    /// despite the two calls typically being on different channels.
2178    ///
2179    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
2180    ///
2181    /// All table fields are currently required.
2182    ///
2183    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
2184    ///   different `Node` channel, to prove that the client obtained the handle
2185    ///   from this `Node`.
2186    pub fn r#get_node_ref(
2187        &self,
2188        ___deadline: zx::MonotonicInstant,
2189    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
2190        let _response = self.client.send_query::<
2191            fidl::encoding::EmptyPayload,
2192            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
2193        >(
2194            (),
2195            0x5b3d0e51614df053,
2196            fidl::encoding::DynamicFlags::FLEXIBLE,
2197            ___deadline,
2198        )?
2199        .into_result::<BufferCollectionMarker>("get_node_ref")?;
2200        Ok(_response)
2201    }
2202
2203    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
2204    /// rooted at a different child token of a common parent
2205    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
2206    /// passed-in `node_ref`.
2207    ///
2208    /// This call is for assisting with admission control de-duplication, and
2209    /// with debugging.
2210    ///
2211    /// The `node_ref` must be obtained using
2212    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
2213    ///
2214    /// The `node_ref` can be a duplicated handle; it's not necessary to call
2215    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
2216    ///
2217    /// If a calling token may not actually be a valid token at all due to a
2218    /// potentially hostile/untrusted provider of the token, call
2219    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
2220    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
2221    /// never responds due to a calling token not being a real token (not really
2222    /// talking to sysmem).  Another option is to call
2223    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
2224    /// which also validates the token along with converting it to a
2225    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
2226    ///
2227    /// All table fields are currently required.
2228    ///
2229    /// - response `is_alternate`
2230    ///   - true: The first parent node in common between the calling node and
2231    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
2232    ///     that the calling `Node` and the `node_ref` `Node` will not have both
2233    ///     their constraints apply - rather sysmem will choose one or the other
2234    ///     of the constraints - never both.  This is because only one child of
2235    ///     a `BufferCollectionTokenGroup` is selected during logical
2236    ///     allocation, with only that one child's subtree contributing to
2237    ///     constraints aggregation.
2238    ///   - false: The first parent node in common between the calling `Node`
2239    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
2240    ///     Currently, this means the first parent node in common is a
2241    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
2242    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
2243    ///     `Node` may have both their constraints apply during constraints
2244    ///     aggregation of the logical allocation, if both `Node`(s) are
2245    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
2246    ///     this case, there is no `BufferCollectionTokenGroup` that will
2247    ///     directly prevent the two `Node`(s) from both being selected and
2248    ///     their constraints both aggregated, but even when false, one or both
2249    ///     `Node`(s) may still be eliminated from consideration if one or both
2250    ///     `Node`(s) has a direct or indirect parent
2251    ///     `BufferCollectionTokenGroup` which selects a child subtree other
2252    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
2253    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
2254    ///   associated with the same buffer collection as the calling `Node`.
2255    ///   Another reason for this error is if the `node_ref` is an
2256    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
2257    ///   a real `node_ref` obtained from `GetNodeRef`.
2258    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
2259    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
2260    ///   the needed rights expected on a real `node_ref`.
2261    /// * No other failing status codes are returned by this call.  However,
2262    ///   sysmem may add additional codes in future, so the client should have
2263    ///   sensible default handling for any failing status code.
2264    pub fn r#is_alternate_for(
2265        &self,
2266        mut payload: NodeIsAlternateForRequest,
2267        ___deadline: zx::MonotonicInstant,
2268    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
2269        let _response = self.client.send_query::<
2270            NodeIsAlternateForRequest,
2271            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
2272        >(
2273            &mut payload,
2274            0x3a58e00157e0825,
2275            fidl::encoding::DynamicFlags::FLEXIBLE,
2276            ___deadline,
2277        )?
2278        .into_result::<BufferCollectionMarker>("is_alternate_for")?;
2279        Ok(_response.map(|x| x))
2280    }
2281
2282    /// Get the buffer collection ID. This ID is also available from
2283    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
2284    /// within the collection).
2285    ///
2286    /// This call is mainly useful in situations where we can't convey a
2287    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
2288    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
2289    /// handle, which can be joined back up with a `BufferCollection` client end
2290    /// that was created via a different path. Prefer to convey a
2291    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
2292    ///
2293    /// Trusting a `buffer_collection_id` value from a source other than sysmem
2294    /// is analogous to trusting a koid value from a source other than zircon.
2295    /// Both should be avoided unless really necessary, and both require
2296    /// caution. In some situations it may be reasonable to refer to a
2297    /// pre-established `BufferCollection` by `buffer_collection_id` via a
2298    /// protocol for efficiency reasons, but an incoming value purporting to be
2299    /// a `buffer_collection_id` is not sufficient alone to justify granting the
2300    /// sender of the `buffer_collection_id` any capability. The sender must
2301    /// first prove to a receiver that the sender has/had a VMO or has/had a
2302    /// `BufferCollectionToken` to the same collection by sending a handle that
2303    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
2304    /// `buffer_collection_id` value. The receiver should take care to avoid
2305    /// assuming that a sender had a `BufferCollectionToken` in cases where the
2306    /// sender has only proven that the sender had a VMO.
2307    ///
2308    /// - response `buffer_collection_id` This ID is unique per buffer
2309    ///   collection per boot. Each buffer is uniquely identified by the
2310    ///   `buffer_collection_id` and `buffer_index` together.
2311    pub fn r#get_buffer_collection_id(
2312        &self,
2313        ___deadline: zx::MonotonicInstant,
2314    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
2315        let _response = self.client.send_query::<
2316            fidl::encoding::EmptyPayload,
2317            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
2318        >(
2319            (),
2320            0x77d19a494b78ba8c,
2321            fidl::encoding::DynamicFlags::FLEXIBLE,
2322            ___deadline,
2323        )?
2324        .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
2325        Ok(_response)
2326    }
2327
2328    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
2329    /// created after this message to weak, which means that a client's `Node`
2330    /// client end (or a child created after this message) is not alone
2331    /// sufficient to keep allocated VMOs alive.
2332    ///
2333    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
2334    /// `close_weak_asap`.
2335    ///
2336    /// This message is only permitted before the `Node` becomes ready for
2337    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
2338    ///   * `BufferCollectionToken`: any time
2339    ///   * `BufferCollection`: before `SetConstraints`
2340    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
2341    ///
2342    /// Currently, no conversion from strong `Node` to weak `Node` after ready
2343    /// for allocation is provided, but a client can simulate that by creating
2344    /// an additional `Node` before allocation and setting that additional
2345    /// `Node` to weak, and then potentially at some point later sending
2346    /// `Release` and closing the client end of the client's strong `Node`, but
2347    /// keeping the client's weak `Node`.
2348    ///
2349    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
2350    /// collection failure (all `Node` client end(s) will see
2351    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
2352    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
2353    /// this situation until all `Node`(s) are ready for allocation. For initial
2354    /// allocation to succeed, at least one strong `Node` is required to exist
2355    /// at allocation time, but after that client receives VMO handles, that
2356    /// client can `BufferCollection.Release` and close the client end without
2357    /// causing this type of failure.
2358    ///
2359    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
2360    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
2361    /// separately as appropriate.
2362    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
2363        self.client.send::<fidl::encoding::EmptyPayload>(
2364            (),
2365            0x22dd3ea514eeffe1,
2366            fidl::encoding::DynamicFlags::FLEXIBLE,
2367        )
2368    }
2369
2370    /// This indicates to sysmem that the client is prepared to pay attention to
2371    /// `close_weak_asap`.
2372    ///
2373    /// If sent, this message must be before
2374    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
2375    ///
2376    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
2377    /// send this message before `WaitForAllBuffersAllocated`, or a parent
2378    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
2379    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
2380    /// trigger buffer collection failure.
2381    ///
2382    /// This message is necessary because weak sysmem VMOs have not always been
2383    /// a thing, so older clients are not aware of the need to pay attention to
2384    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
2385    /// sysmem weak VMO handles asap. By having this message and requiring
2386    /// participants to indicate their acceptance of this aspect of the overall
2387    /// protocol, we avoid situations where an older client is delivered a weak
2388    /// VMO without any way for sysmem to get that VMO to close quickly later
2389    /// (and on a per-buffer basis).
2390    ///
2391    /// A participant that doesn't handle `close_weak_asap` and also doesn't
2392    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
2393    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
2394    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
2395    /// same participant has a child/delegate which does retrieve VMOs, that
2396    /// child/delegate will need to send `SetWeakOk` before
2397    /// `WaitForAllBuffersAllocated`.
2398    ///
2399    /// + request `for_child_nodes_also` If present and true, this means direct
2400    ///   child nodes of this node created after this message plus all
2401    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
2402    ///   those nodes. Any child node of this node that was created before this
2403    ///   message is not included. This setting is "sticky" in the sense that a
2404    ///   subsequent `SetWeakOk` without this bool set to true does not reset
2405    ///   the server-side bool. If this creates a problem for a participant, a
2406    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
2407    ///   tokens instead, as appropriate. A participant should only set
2408    ///   `for_child_nodes_also` true if the participant can really promise to
2409    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
2410    ///   weak VMO handles held by participants holding the corresponding child
2411    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
2412    ///   which are using sysmem(1) can be weak, despite the clients of those
2413    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
2414    ///   direct way to find out about `close_weak_asap`. This only applies to
2415    ///   descendents of this `Node` which are using sysmem(1), not to this
2416    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
2417    ///   token, which will fail allocation unless an ancestor of this `Node`
2418    ///   specified `for_child_nodes_also` true.
2419    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
2420        self.client.send::<NodeSetWeakOkRequest>(
2421            &mut payload,
2422            0x38a44fc4d7724be9,
2423            fidl::encoding::DynamicFlags::FLEXIBLE,
2424        )
2425    }
2426
2427    /// The server_end will be closed after this `Node` and any child nodes have
2428    /// have released their buffer counts, making those counts available for
2429    /// reservation by a different `Node` via
2430    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
2431    ///
2432    /// The `Node` buffer counts may not be released until the entire tree of
2433    /// `Node`(s) is closed or failed, because
2434    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
2435    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
2436    /// `Node` buffer counts remain reserved until the orphaned node is later
2437    /// cleaned up.
2438    ///
2439    /// If the `Node` exceeds a fairly large number of attached eventpair server
2440    /// ends, a log message will indicate this and the `Node` (and the
2441    /// appropriate) sub-tree will fail.
2442    ///
2443    /// The `server_end` will remain open when
2444    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
2445    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
2446    /// [`fuchsia.sysmem2/BufferCollection`].
2447    ///
2448    /// This message can also be used with a
2449    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
2450    pub fn r#attach_node_tracking(
2451        &self,
2452        mut payload: NodeAttachNodeTrackingRequest,
2453    ) -> Result<(), fidl::Error> {
2454        self.client.send::<NodeAttachNodeTrackingRequest>(
2455            &mut payload,
2456            0x3f22f2a293d3cdac,
2457            fidl::encoding::DynamicFlags::FLEXIBLE,
2458        )
2459    }
2460
2461    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
2462    /// collection.
2463    ///
2464    /// A participant may only call
2465    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
2466    /// [`fuchsia.sysmem2/BufferCollection`].
2467    ///
2468    /// For buffer allocation to be attempted, all holders of a
2469    /// `BufferCollection` client end need to call `SetConstraints` before
2470    /// sysmem will attempt to allocate buffers.
2471    ///
2472    /// + request `constraints` These are the constraints on the buffer
2473    ///   collection imposed by the sending client/participant.  The
2474    ///   `constraints` field is not required to be set. If not set, the client
2475    ///   is not setting any actual constraints, but is indicating that the
2476    ///   client has no constraints to set. A client that doesn't set the
2477    ///   `constraints` field won't receive any VMO handles, but can still find
2478    ///   out how many buffers were allocated and can still refer to buffers by
2479    ///   their `buffer_index`.
2480    pub fn r#set_constraints(
2481        &self,
2482        mut payload: BufferCollectionSetConstraintsRequest,
2483    ) -> Result<(), fidl::Error> {
2484        self.client.send::<BufferCollectionSetConstraintsRequest>(
2485            &mut payload,
2486            0x1fde0f19d650197b,
2487            fidl::encoding::DynamicFlags::FLEXIBLE,
2488        )
2489    }
2490
2491    /// Wait until all buffers are allocated.
2492    ///
2493    /// This FIDL call completes when buffers have been allocated, or completes
2494    /// with some failure detail if allocation has been attempted but failed.
2495    ///
2496    /// The following must occur before buffers will be allocated:
2497    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
2498    ///     collection must be turned in via `BindSharedCollection` to get a
2499    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
2500    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
2501    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
2502    ///     to them.
2503    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
2504    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
2505    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
2506    ///     sent to them.
2507    ///
2508    /// - result `buffer_collection_info` The VMO handles and other related
2509    ///   info.
2510    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
2511    ///   cannot be fulfilled due to resource exhaustion.
2512    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
2513    ///   malformed.
2514    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
2515    ///   request is valid but cannot be satisfied, perhaps due to hardware
2516    ///   limitations. This can happen if participants have incompatible
2517    ///   constraints (empty intersection, roughly speaking). See the log for
2518    ///   more info. In cases where a participant could potentially be treated
2519    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
2520    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
2521    ///   error code if there aren't enough buffers in the pre-existing
2522    ///   collection to satisfy the constraints set on the attached token and
2523    ///   any sub-tree of tokens derived from the attached token.
2524    pub fn r#wait_for_all_buffers_allocated(
2525        &self,
2526        ___deadline: zx::MonotonicInstant,
2527    ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
2528        let _response = self
2529            .client
2530            .send_query::<fidl::encoding::EmptyPayload, fidl::encoding::FlexibleResultType<
2531                BufferCollectionWaitForAllBuffersAllocatedResponse,
2532                Error,
2533            >>(
2534                (), 0x62300344b61404e, fidl::encoding::DynamicFlags::FLEXIBLE, ___deadline
2535            )?
2536            .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
2537        Ok(_response.map(|x| x))
2538    }
2539
2540    /// Checks whether all the buffers have been allocated, in a polling
2541    /// fashion.
2542    ///
2543    /// * If the buffer collection has been allocated, returns success.
2544    /// * If the buffer collection failed allocation, returns the same
2545    ///   [`fuchsia.sysmem2/Error`] as
2546    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
2547    ///   return.
2548    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
2549    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
2550    ///   would not respond quickly.
2551    pub fn r#check_all_buffers_allocated(
2552        &self,
2553        ___deadline: zx::MonotonicInstant,
2554    ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
2555        let _response = self.client.send_query::<
2556            fidl::encoding::EmptyPayload,
2557            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
2558        >(
2559            (),
2560            0x35a5fe77ce939c10,
2561            fidl::encoding::DynamicFlags::FLEXIBLE,
2562            ___deadline,
2563        )?
2564        .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
2565        Ok(_response.map(|x| x))
2566    }
2567
2568    /// Create a new token to add a new participant to an existing logical
2569    /// buffer collection, if the existing collection's buffer counts,
2570    /// constraints, and participants allow.
2571    ///
2572    /// This can be useful in replacing a failed participant, and/or in
2573    /// adding/re-adding a participant after buffers have already been
2574    /// allocated.
2575    ///
2576    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
2577    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
2578    /// goes through the normal procedure of setting constraints or closing
2579    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
2580    /// clients' point of view, despite the possibility that all the buffers
2581    /// were actually allocated previously. This process is called "logical
2582    /// allocation". Most instances of "allocation" in docs for other messages
2583    /// can also be read as "allocation or logical allocation" while remaining
2584    /// valid, but we just say "allocation" in most places for brevity/clarity
2585    /// of explanation, with the details of "logical allocation" left for the
2586    /// docs here on `AttachToken`.
2587    ///
2588    /// Failure of an attached `Node` does not propagate to the parent of the
2589    /// attached `Node`. More generally, failure of a child `Node` is blocked
2590    /// from reaching its parent `Node` if the child is attached, or if the
2591    /// child is dispensable and the failure occurred after logical allocation
2592    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
2593    ///
2594    /// A participant may in some scenarios choose to initially use a
2595    /// dispensable token for a given instance of a delegate participant, and
2596    /// then later if the first instance of that delegate participant fails, a
2597    /// new second instance of that delegate participant my be given a token
2598    /// created with `AttachToken`.
2599    ///
2600    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
2601    /// client end, the token acts like any other token. The client can
2602    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
2603    /// and can send the token to a different process/participant. The
2604    /// `BufferCollectionToken` `Node` should be converted to a
2605    /// `BufferCollection` `Node` as normal by sending
2606    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
2607    /// without causing subtree failure by sending
2608    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
2609    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
2610    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
2611    /// the `BufferCollection`.
2612    ///
2613    /// Within the subtree, a success result from
2614    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
2615    /// the subtree participants' constraints were satisfiable using the
2616    /// already-existing buffer collection, the already-established
2617    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
2618    /// constraints, and the already-existing other participants (already added
2619    /// via successful logical allocation) and their specified buffer counts in
2620    /// their constraints. A failure result means the new participants'
2621    /// constraints cannot be satisfied using the existing buffer collection and
2622    /// its already-added participants. Creating a new collection instead may
2623    /// allow all participants' constraints to be satisfied, assuming
2624    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
2625    /// used.
2626    ///
2627    /// A token created with `AttachToken` performs constraints aggregation with
2628    /// all constraints currently in effect on the buffer collection, plus the
2629    /// attached token under consideration plus child tokens under the attached
2630    /// token which are not themselves an attached token or under such a token.
2631    /// Further subtrees under this subtree are considered for logical
2632    /// allocation only after this subtree has completed logical allocation.
2633    ///
2634    /// Assignment of existing buffers to participants'
2635    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
2636    /// etc is first-come first-served, but a child can't logically allocate
2637    /// before all its parents have sent `SetConstraints`.
2638    ///
2639    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
2640    /// in contrast to `AttachToken`, has the created token `Node` + child
2641    /// `Node`(s) (in the created subtree but not in any subtree under this
2642    /// subtree) participate in constraints aggregation along with its parent
2643    /// during the parent's allocation or logical allocation.
2644    ///
2645    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
2646    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
2647    /// sysmem before the new token can be passed to `BindSharedCollection`. The
2648    /// `Sync` of the new token can be accomplished with
2649    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
2650    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
2651    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
2652    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
2653    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
2654    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
2655    /// created token, to also sync those additional tokens to sysmem using a
2656    /// single round-trip.
2657    ///
2658    /// All table fields are currently required.
2659    ///
2660    /// + request `rights_attentuation_mask` This allows attenuating the VMO
2661    ///   rights of the subtree. These values for `rights_attenuation_mask`
2662    ///   result in no attenuation (note that 0 is not on this list):
2663    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
2664    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
2665    /// + request `token_request` The server end of the `BufferCollectionToken`
2666    ///   channel. The client retains the client end.
2667    pub fn r#attach_token(
2668        &self,
2669        mut payload: BufferCollectionAttachTokenRequest,
2670    ) -> Result<(), fidl::Error> {
2671        self.client.send::<BufferCollectionAttachTokenRequest>(
2672            &mut payload,
2673            0x46ac7d0008492982,
2674            fidl::encoding::DynamicFlags::FLEXIBLE,
2675        )
2676    }
2677
2678    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
2679    /// buffers have been allocated and only the specified number of buffers (or
2680    /// fewer) remain in the buffer collection.
2681    ///
2682    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
2683    /// client to wait until an old buffer collection is fully or mostly
2684    /// deallocated before attempting allocation of a new buffer collection. The
2685    /// eventpair is only signalled when the buffers of this collection have
2686    /// been fully deallocated (not just un-referenced by clients, but all the
2687    /// memory consumed by those buffers has been fully reclaimed/recycled), or
2688    /// when allocation or logical allocation fails for the tree or subtree
2689    /// including this [`fuchsia.sysmem2/BufferCollection`].
2690    ///
2691    /// The eventpair won't be signalled until allocation or logical allocation
2692    /// has completed; until then, the collection's current buffer count is
2693    /// ignored.
2694    ///
2695    /// If logical allocation fails for an attached subtree (using
2696    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
2697    /// eventpair will close during that failure regardless of the number of
2698    /// buffers potenitally allocated in the overall buffer collection. This is
2699    /// for logical allocation consistency with normal allocation.
2700    ///
2701    /// The lifetime signalled by this event includes asynchronous cleanup of
2702    /// allocated buffers, and this asynchronous cleanup cannot occur until all
2703    /// holders of VMO handles to the buffers have closed those VMO handles.
2704    /// Therefore, clients should take care not to become blocked forever
2705    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
2706    /// participants using the logical buffer collection (including the waiter
2707    /// itself) are less trusted, less reliable, or potentially blocked by the
2708    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
2709    /// for the client wait may be prudent, depending on details of how the
2710    /// collection and/or its VMOs are used or shared. Failure to allocate a
2711    /// new/replacement buffer collection is better than getting stuck forever.
2712    ///
2713    /// The sysmem server itself intentionally does not perform any waiting on
2714    /// already-failed collections' VMOs to finish cleaning up before attempting
2715    /// a new allocation, and the sysmem server intentionally doesn't retry
2716    /// allocation if a new allocation fails due to out of memory, even if that
2717    /// failure is potentially due to continued existence of an old collection's
2718    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
2719    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
2720    /// as long as the waiting client is careful to not create a deadlock.
2721    ///
2722    /// Continued existence of old collections that are still cleaning up is not
2723    /// the only reason that a new allocation may fail due to insufficient
2724    /// memory, even if the new allocation is allocating physically contiguous
2725    /// buffers. Overall system memory pressure can also be the cause of failure
2726    /// to allocate a new collection. See also
2727    /// [`fuchsia.memorypressure/Provider`].
2728    ///
2729    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
2730    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
2731    /// `eventpair` handle (server end) can be sent via more than one
2732    /// `AttachLifetimeTracking` message to different protocols, and the
2733    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
2734    /// the conditions are met (all holders of duplicates have closed their
2735    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
2736    /// client end can (also) be duplicated without preventing the
2737    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
2738    ///
2739    /// The server intentionally doesn't "trust" any signals set on the
2740    /// `server_end`. This mechanism intentionally uses only
2741    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
2742    /// "early", and is only set when all handles to the server end eventpair
2743    /// are closed. No meaning is associated with any of the other signals, and
2744    /// clients should ignore any other signal bits on either end of the
2745    /// `eventpair`.
2746    ///
2747    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
2748    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
2749    /// transfer without causing `BufferCollection` channel failure).
2750    ///
2751    /// All table fields are currently required.
2752    ///
2753    /// + request `server_end` This eventpair handle will be closed by the
2754    ///   sysmem server when buffers have been allocated initially and the
2755    ///   number of buffers is then less than or equal to `buffers_remaining`.
2756    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
2757    ///   fewer) buffers to be fully deallocated. A number greater than zero can
2758    ///   be useful in situations where a known number of buffers are
2759    ///   intentionally not closed so that the data can continue to be used,
2760    ///   such as for keeping the last available video frame displayed in the UI
2761    ///   even if the video stream was using protected output buffers. It's
2762    ///   outside the scope of the `BufferCollection` interface (at least for
2763    ///   now) to determine how many buffers may be held without closing, but
2764    ///   it'll typically be in the range 0-2.
2765    pub fn r#attach_lifetime_tracking(
2766        &self,
2767        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
2768    ) -> Result<(), fidl::Error> {
2769        self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
2770            &mut payload,
2771            0x3ecb510113116dcf,
2772            fidl::encoding::DynamicFlags::FLEXIBLE,
2773        )
2774    }
2775}
2776
2777#[cfg(target_os = "fuchsia")]
2778impl From<BufferCollectionSynchronousProxy> for zx::Handle {
2779    fn from(value: BufferCollectionSynchronousProxy) -> Self {
2780        value.into_channel().into()
2781    }
2782}
2783
2784#[cfg(target_os = "fuchsia")]
2785impl From<fidl::Channel> for BufferCollectionSynchronousProxy {
2786    fn from(value: fidl::Channel) -> Self {
2787        Self::new(value)
2788    }
2789}
2790
2791#[cfg(target_os = "fuchsia")]
2792impl fidl::endpoints::FromClient for BufferCollectionSynchronousProxy {
2793    type Protocol = BufferCollectionMarker;
2794
2795    fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionMarker>) -> Self {
2796        Self::new(value.into_channel())
2797    }
2798}
2799
2800#[derive(Debug, Clone)]
2801pub struct BufferCollectionProxy {
2802    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
2803}
2804
2805impl fidl::endpoints::Proxy for BufferCollectionProxy {
2806    type Protocol = BufferCollectionMarker;
2807
2808    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
2809        Self::new(inner)
2810    }
2811
2812    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
2813        self.client.into_channel().map_err(|client| Self { client })
2814    }
2815
2816    fn as_channel(&self) -> &::fidl::AsyncChannel {
2817        self.client.as_channel()
2818    }
2819}
2820
2821impl BufferCollectionProxy {
2822    /// Create a new Proxy for fuchsia.sysmem2/BufferCollection.
2823    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
2824        let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
2825        Self { client: fidl::client::Client::new(channel, protocol_name) }
2826    }
2827
2828    /// Get a Stream of events from the remote end of the protocol.
2829    ///
2830    /// # Panics
2831    ///
2832    /// Panics if the event stream was already taken.
2833    pub fn take_event_stream(&self) -> BufferCollectionEventStream {
2834        BufferCollectionEventStream { event_receiver: self.client.take_event_receiver() }
2835    }
2836
2837    /// Ensure that previous messages have been received server side. This is
2838    /// particularly useful after previous messages that created new tokens,
2839    /// because a token must be known to the sysmem server before sending the
2840    /// token to another participant.
2841    ///
2842    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
2843    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
2844    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
2845    /// to mitigate the possibility of a hostile/fake
2846    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
2847    /// Another way is to pass the token to
2848    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
2849    /// the token as part of exchanging it for a
2850    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
2851    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
2852    /// of stalling.
2853    ///
2854    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
2855    /// and then starting and completing a `Sync`, it's then safe to send the
2856    /// `BufferCollectionToken` client ends to other participants knowing the
2857    /// server will recognize the tokens when they're sent by the other
2858    /// participants to sysmem in a
2859    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
2860    /// efficient way to create tokens while avoiding unnecessary round trips.
2861    ///
2862    /// Other options include waiting for each
2863    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
2864    /// individually (using separate call to `Sync` after each), or calling
2865    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
2866    /// converted to a `BufferCollection` via
2867    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
2868    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
2869    /// the sync step and can create multiple tokens at once.
2870    pub fn r#sync(
2871        &self,
2872    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
2873        BufferCollectionProxyInterface::r#sync(self)
2874    }
2875
2876    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
2877    ///
2878    /// Normally a participant will convert a `BufferCollectionToken` into a
2879    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
2880    /// `Release` via the token (and then close the channel immediately or
2881    /// shortly later in response to server closing the server end), which
2882    /// avoids causing buffer collection failure. Without a prior `Release`,
2883    /// closing the `BufferCollectionToken` client end will cause buffer
2884    /// collection failure.
2885    ///
2886    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
2887    ///
2888    /// By default the server handles unexpected closure of a
2889    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
2890    /// first) by failing the buffer collection. Partly this is to expedite
2891    /// closing VMO handles to reclaim memory when any participant fails. If a
2892    /// participant would like to cleanly close a `BufferCollection` without
2893    /// causing buffer collection failure, the participant can send `Release`
2894    /// before closing the `BufferCollection` client end. The `Release` can
2895    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
2896    /// buffer collection won't require constraints from this node in order to
2897    /// allocate. If after `SetConstraints`, the constraints are retained and
2898    /// aggregated, despite the lack of `BufferCollection` connection at the
2899    /// time of constraints aggregation.
2900    ///
2901    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2902    ///
2903    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2904    /// end (without `Release` first) will trigger failure of the buffer
2905    /// collection. To close a `BufferCollectionTokenGroup` channel without
2906    /// failing the buffer collection, ensure that AllChildrenPresent() has been
2907    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2908    /// client end.
2909    ///
2910    /// If `Release` occurs before
2911    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2912    /// buffer collection will fail (triggered by reception of `Release` without
2913    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2914    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2915    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2916    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2917    /// close requires `AllChildrenPresent` (if not already sent), then
2918    /// `Release`, then close client end.
2919    ///
2920    /// If `Release` occurs after `AllChildrenPresent`, the children and all
2921    /// their constraints remain intact (just as they would if the
2922    /// `BufferCollectionTokenGroup` channel had remained open), and the client
2923    /// end close doesn't trigger buffer collection failure.
2924    ///
2925    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2926    ///
2927    /// For brevity, the per-channel-protocol paragraphs above ignore the
2928    /// separate failure domain created by
2929    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2930    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2931    /// unexpectedly closes (without `Release` first) and that client end is
2932    /// under a failure domain, instead of failing the whole buffer collection,
2933    /// the failure domain is failed, but the buffer collection itself is
2934    /// isolated from failure of the failure domain. Such failure domains can be
2935    /// nested, in which case only the inner-most failure domain in which the
2936    /// `Node` resides fails.
2937    pub fn r#release(&self) -> Result<(), fidl::Error> {
2938        BufferCollectionProxyInterface::r#release(self)
2939    }
2940
2941    /// Set a name for VMOs in this buffer collection.
2942    ///
2943    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2944    /// will be truncated to fit. The name of the vmo will be suffixed with the
2945    /// buffer index within the collection (if the suffix fits within
2946    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2947    /// listed in the inspect data.
2948    ///
2949    /// The name only affects VMOs allocated after the name is set; this call
2950    /// does not rename existing VMOs. If multiple clients set different names
2951    /// then the larger priority value will win. Setting a new name with the
2952    /// same priority as a prior name doesn't change the name.
2953    ///
2954    /// All table fields are currently required.
2955    ///
2956    /// + request `priority` The name is only set if this is the first `SetName`
2957    ///   or if `priority` is greater than any previous `priority` value in
2958    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
2959    /// + request `name` The name for VMOs created under this buffer collection.
2960    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2961        BufferCollectionProxyInterface::r#set_name(self, payload)
2962    }
2963
2964    /// Set information about the current client that can be used by sysmem to
2965    /// help diagnose leaking memory and allocation stalls waiting for a
2966    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2967    ///
2968    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2969    /// `Node`(s) derived from this `Node`, unless overriden by
2970    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2971    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2972    ///
2973    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2974    /// `Allocator` is the most efficient way to ensure that all
2975    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2976    /// set, and is also more efficient than separately sending the same debug
2977    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2978    /// created [`fuchsia.sysmem2/Node`].
2979    ///
2980    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2981    /// indicate which client is closing their channel first, leading to subtree
2982    /// failure (which can be normal if the purpose of the subtree is over, but
2983    /// if happening earlier than expected, the client-channel-specific name can
2984    /// help diagnose where the failure is first coming from, from sysmem's
2985    /// point of view).
2986    ///
2987    /// All table fields are currently required.
2988    ///
2989    /// + request `name` This can be an arbitrary string, but the current
2990    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
2991    /// + request `id` This can be an arbitrary id, but the current process ID
2992    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
2993    pub fn r#set_debug_client_info(
2994        &self,
2995        mut payload: &NodeSetDebugClientInfoRequest,
2996    ) -> Result<(), fidl::Error> {
2997        BufferCollectionProxyInterface::r#set_debug_client_info(self, payload)
2998    }
2999
3000    /// Sysmem logs a warning if sysmem hasn't seen
3001    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
3002    /// within 5 seconds after creation of a new collection.
3003    ///
3004    /// Clients can call this method to change when the log is printed. If
3005    /// multiple client set the deadline, it's unspecified which deadline will
3006    /// take effect.
3007    ///
3008    /// In most cases the default works well.
3009    ///
3010    /// All table fields are currently required.
3011    ///
3012    /// + request `deadline` The time at which sysmem will start trying to log
3013    ///   the warning, unless all constraints are with sysmem by then.
3014    pub fn r#set_debug_timeout_log_deadline(
3015        &self,
3016        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3017    ) -> Result<(), fidl::Error> {
3018        BufferCollectionProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
3019    }
3020
3021    /// This enables verbose logging for the buffer collection.
3022    ///
3023    /// Verbose logging includes constraints set via
3024    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
3025    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
3026    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
3027    /// the tree of `Node`(s).
3028    ///
3029    /// Normally sysmem prints only a single line complaint when aggregation
3030    /// fails, with just the specific detailed reason that aggregation failed,
3031    /// with little surrounding context.  While this is often enough to diagnose
3032    /// a problem if only a small change was made and everything was working
3033    /// before the small change, it's often not particularly helpful for getting
3034    /// a new buffer collection to work for the first time.  Especially with
3035    /// more complex trees of nodes, involving things like
3036    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
3037    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
3038    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
3039    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
3040    /// looks like and why it's failing a logical allocation, or why a tree or
3041    /// subtree is failing sooner than expected.
3042    ///
3043    /// The intent of the extra logging is to be acceptable from a performance
3044    /// point of view, under the assumption that verbose logging is only enabled
3045    /// on a low number of buffer collections. If we're not tracking down a bug,
3046    /// we shouldn't send this message.
3047    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3048        BufferCollectionProxyInterface::r#set_verbose_logging(self)
3049    }
3050
3051    /// This gets a handle that can be used as a parameter to
3052    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
3053    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
3054    /// client obtained this handle from this `Node`.
3055    ///
3056    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
3057    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
3058    /// despite the two calls typically being on different channels.
3059    ///
3060    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
3061    ///
3062    /// All table fields are currently required.
3063    ///
3064    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
3065    ///   different `Node` channel, to prove that the client obtained the handle
3066    ///   from this `Node`.
3067    pub fn r#get_node_ref(
3068        &self,
3069    ) -> fidl::client::QueryResponseFut<
3070        NodeGetNodeRefResponse,
3071        fidl::encoding::DefaultFuchsiaResourceDialect,
3072    > {
3073        BufferCollectionProxyInterface::r#get_node_ref(self)
3074    }
3075
3076    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
3077    /// rooted at a different child token of a common parent
3078    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
3079    /// passed-in `node_ref`.
3080    ///
3081    /// This call is for assisting with admission control de-duplication, and
3082    /// with debugging.
3083    ///
3084    /// The `node_ref` must be obtained using
3085    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
3086    ///
3087    /// The `node_ref` can be a duplicated handle; it's not necessary to call
3088    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
3089    ///
3090    /// If a calling token may not actually be a valid token at all due to a
3091    /// potentially hostile/untrusted provider of the token, call
3092    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
3093    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
3094    /// never responds due to a calling token not being a real token (not really
3095    /// talking to sysmem).  Another option is to call
3096    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
3097    /// which also validates the token along with converting it to a
3098    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
3099    ///
3100    /// All table fields are currently required.
3101    ///
3102    /// - response `is_alternate`
3103    ///   - true: The first parent node in common between the calling node and
3104    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
3105    ///     that the calling `Node` and the `node_ref` `Node` will not have both
3106    ///     their constraints apply - rather sysmem will choose one or the other
3107    ///     of the constraints - never both.  This is because only one child of
3108    ///     a `BufferCollectionTokenGroup` is selected during logical
3109    ///     allocation, with only that one child's subtree contributing to
3110    ///     constraints aggregation.
3111    ///   - false: The first parent node in common between the calling `Node`
3112    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
3113    ///     Currently, this means the first parent node in common is a
3114    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
3115    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
3116    ///     `Node` may have both their constraints apply during constraints
3117    ///     aggregation of the logical allocation, if both `Node`(s) are
3118    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
3119    ///     this case, there is no `BufferCollectionTokenGroup` that will
3120    ///     directly prevent the two `Node`(s) from both being selected and
3121    ///     their constraints both aggregated, but even when false, one or both
3122    ///     `Node`(s) may still be eliminated from consideration if one or both
3123    ///     `Node`(s) has a direct or indirect parent
3124    ///     `BufferCollectionTokenGroup` which selects a child subtree other
3125    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
3126    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
3127    ///   associated with the same buffer collection as the calling `Node`.
3128    ///   Another reason for this error is if the `node_ref` is an
3129    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
3130    ///   a real `node_ref` obtained from `GetNodeRef`.
3131    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
3132    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
3133    ///   the needed rights expected on a real `node_ref`.
3134    /// * No other failing status codes are returned by this call.  However,
3135    ///   sysmem may add additional codes in future, so the client should have
3136    ///   sensible default handling for any failing status code.
3137    pub fn r#is_alternate_for(
3138        &self,
3139        mut payload: NodeIsAlternateForRequest,
3140    ) -> fidl::client::QueryResponseFut<
3141        NodeIsAlternateForResult,
3142        fidl::encoding::DefaultFuchsiaResourceDialect,
3143    > {
3144        BufferCollectionProxyInterface::r#is_alternate_for(self, payload)
3145    }
3146
3147    /// Get the buffer collection ID. This ID is also available from
3148    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
3149    /// within the collection).
3150    ///
3151    /// This call is mainly useful in situations where we can't convey a
3152    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
3153    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
3154    /// handle, which can be joined back up with a `BufferCollection` client end
3155    /// that was created via a different path. Prefer to convey a
3156    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
3157    ///
3158    /// Trusting a `buffer_collection_id` value from a source other than sysmem
3159    /// is analogous to trusting a koid value from a source other than zircon.
3160    /// Both should be avoided unless really necessary, and both require
3161    /// caution. In some situations it may be reasonable to refer to a
3162    /// pre-established `BufferCollection` by `buffer_collection_id` via a
3163    /// protocol for efficiency reasons, but an incoming value purporting to be
3164    /// a `buffer_collection_id` is not sufficient alone to justify granting the
3165    /// sender of the `buffer_collection_id` any capability. The sender must
3166    /// first prove to a receiver that the sender has/had a VMO or has/had a
3167    /// `BufferCollectionToken` to the same collection by sending a handle that
3168    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
3169    /// `buffer_collection_id` value. The receiver should take care to avoid
3170    /// assuming that a sender had a `BufferCollectionToken` in cases where the
3171    /// sender has only proven that the sender had a VMO.
3172    ///
3173    /// - response `buffer_collection_id` This ID is unique per buffer
3174    ///   collection per boot. Each buffer is uniquely identified by the
3175    ///   `buffer_collection_id` and `buffer_index` together.
3176    pub fn r#get_buffer_collection_id(
3177        &self,
3178    ) -> fidl::client::QueryResponseFut<
3179        NodeGetBufferCollectionIdResponse,
3180        fidl::encoding::DefaultFuchsiaResourceDialect,
3181    > {
3182        BufferCollectionProxyInterface::r#get_buffer_collection_id(self)
3183    }
3184
3185    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
3186    /// created after this message to weak, which means that a client's `Node`
3187    /// client end (or a child created after this message) is not alone
3188    /// sufficient to keep allocated VMOs alive.
3189    ///
3190    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
3191    /// `close_weak_asap`.
3192    ///
3193    /// This message is only permitted before the `Node` becomes ready for
3194    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
3195    ///   * `BufferCollectionToken`: any time
3196    ///   * `BufferCollection`: before `SetConstraints`
3197    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
3198    ///
3199    /// Currently, no conversion from strong `Node` to weak `Node` after ready
3200    /// for allocation is provided, but a client can simulate that by creating
3201    /// an additional `Node` before allocation and setting that additional
3202    /// `Node` to weak, and then potentially at some point later sending
3203    /// `Release` and closing the client end of the client's strong `Node`, but
3204    /// keeping the client's weak `Node`.
3205    ///
3206    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
3207    /// collection failure (all `Node` client end(s) will see
3208    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
3209    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
3210    /// this situation until all `Node`(s) are ready for allocation. For initial
3211    /// allocation to succeed, at least one strong `Node` is required to exist
3212    /// at allocation time, but after that client receives VMO handles, that
3213    /// client can `BufferCollection.Release` and close the client end without
3214    /// causing this type of failure.
3215    ///
3216    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
3217    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
3218    /// separately as appropriate.
3219    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
3220        BufferCollectionProxyInterface::r#set_weak(self)
3221    }
3222
3223    /// This indicates to sysmem that the client is prepared to pay attention to
3224    /// `close_weak_asap`.
3225    ///
3226    /// If sent, this message must be before
3227    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
3228    ///
3229    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
3230    /// send this message before `WaitForAllBuffersAllocated`, or a parent
3231    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
3232    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
3233    /// trigger buffer collection failure.
3234    ///
3235    /// This message is necessary because weak sysmem VMOs have not always been
3236    /// a thing, so older clients are not aware of the need to pay attention to
3237    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
3238    /// sysmem weak VMO handles asap. By having this message and requiring
3239    /// participants to indicate their acceptance of this aspect of the overall
3240    /// protocol, we avoid situations where an older client is delivered a weak
3241    /// VMO without any way for sysmem to get that VMO to close quickly later
3242    /// (and on a per-buffer basis).
3243    ///
3244    /// A participant that doesn't handle `close_weak_asap` and also doesn't
3245    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
3246    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
3247    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
3248    /// same participant has a child/delegate which does retrieve VMOs, that
3249    /// child/delegate will need to send `SetWeakOk` before
3250    /// `WaitForAllBuffersAllocated`.
3251    ///
3252    /// + request `for_child_nodes_also` If present and true, this means direct
3253    ///   child nodes of this node created after this message plus all
3254    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
3255    ///   those nodes. Any child node of this node that was created before this
3256    ///   message is not included. This setting is "sticky" in the sense that a
3257    ///   subsequent `SetWeakOk` without this bool set to true does not reset
3258    ///   the server-side bool. If this creates a problem for a participant, a
3259    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
3260    ///   tokens instead, as appropriate. A participant should only set
3261    ///   `for_child_nodes_also` true if the participant can really promise to
3262    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
3263    ///   weak VMO handles held by participants holding the corresponding child
3264    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
3265    ///   which are using sysmem(1) can be weak, despite the clients of those
3266    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
3267    ///   direct way to find out about `close_weak_asap`. This only applies to
3268    ///   descendents of this `Node` which are using sysmem(1), not to this
3269    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
3270    ///   token, which will fail allocation unless an ancestor of this `Node`
3271    ///   specified `for_child_nodes_also` true.
3272    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3273        BufferCollectionProxyInterface::r#set_weak_ok(self, payload)
3274    }
3275
3276    /// The server_end will be closed after this `Node` and any child nodes have
3277    /// have released their buffer counts, making those counts available for
3278    /// reservation by a different `Node` via
3279    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
3280    ///
3281    /// The `Node` buffer counts may not be released until the entire tree of
3282    /// `Node`(s) is closed or failed, because
3283    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
3284    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
3285    /// `Node` buffer counts remain reserved until the orphaned node is later
3286    /// cleaned up.
3287    ///
3288    /// If the `Node` exceeds a fairly large number of attached eventpair server
3289    /// ends, a log message will indicate this and the `Node` (and the
3290    /// appropriate) sub-tree will fail.
3291    ///
3292    /// The `server_end` will remain open when
3293    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
3294    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
3295    /// [`fuchsia.sysmem2/BufferCollection`].
3296    ///
3297    /// This message can also be used with a
3298    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
3299    pub fn r#attach_node_tracking(
3300        &self,
3301        mut payload: NodeAttachNodeTrackingRequest,
3302    ) -> Result<(), fidl::Error> {
3303        BufferCollectionProxyInterface::r#attach_node_tracking(self, payload)
3304    }
3305
3306    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
3307    /// collection.
3308    ///
3309    /// A participant may only call
3310    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
3311    /// [`fuchsia.sysmem2/BufferCollection`].
3312    ///
3313    /// For buffer allocation to be attempted, all holders of a
3314    /// `BufferCollection` client end need to call `SetConstraints` before
3315    /// sysmem will attempt to allocate buffers.
3316    ///
3317    /// + request `constraints` These are the constraints on the buffer
3318    ///   collection imposed by the sending client/participant.  The
3319    ///   `constraints` field is not required to be set. If not set, the client
3320    ///   is not setting any actual constraints, but is indicating that the
3321    ///   client has no constraints to set. A client that doesn't set the
3322    ///   `constraints` field won't receive any VMO handles, but can still find
3323    ///   out how many buffers were allocated and can still refer to buffers by
3324    ///   their `buffer_index`.
3325    pub fn r#set_constraints(
3326        &self,
3327        mut payload: BufferCollectionSetConstraintsRequest,
3328    ) -> Result<(), fidl::Error> {
3329        BufferCollectionProxyInterface::r#set_constraints(self, payload)
3330    }
3331
3332    /// Wait until all buffers are allocated.
3333    ///
3334    /// This FIDL call completes when buffers have been allocated, or completes
3335    /// with some failure detail if allocation has been attempted but failed.
3336    ///
3337    /// The following must occur before buffers will be allocated:
3338    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
3339    ///     collection must be turned in via `BindSharedCollection` to get a
3340    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
3341    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
3342    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
3343    ///     to them.
3344    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
3345    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
3346    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
3347    ///     sent to them.
3348    ///
3349    /// - result `buffer_collection_info` The VMO handles and other related
3350    ///   info.
3351    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
3352    ///   cannot be fulfilled due to resource exhaustion.
3353    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
3354    ///   malformed.
3355    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
3356    ///   request is valid but cannot be satisfied, perhaps due to hardware
3357    ///   limitations. This can happen if participants have incompatible
3358    ///   constraints (empty intersection, roughly speaking). See the log for
3359    ///   more info. In cases where a participant could potentially be treated
3360    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
3361    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
3362    ///   error code if there aren't enough buffers in the pre-existing
3363    ///   collection to satisfy the constraints set on the attached token and
3364    ///   any sub-tree of tokens derived from the attached token.
3365    pub fn r#wait_for_all_buffers_allocated(
3366        &self,
3367    ) -> fidl::client::QueryResponseFut<
3368        BufferCollectionWaitForAllBuffersAllocatedResult,
3369        fidl::encoding::DefaultFuchsiaResourceDialect,
3370    > {
3371        BufferCollectionProxyInterface::r#wait_for_all_buffers_allocated(self)
3372    }
3373
3374    /// Checks whether all the buffers have been allocated, in a polling
3375    /// fashion.
3376    ///
3377    /// * If the buffer collection has been allocated, returns success.
3378    /// * If the buffer collection failed allocation, returns the same
3379    ///   [`fuchsia.sysmem2/Error`] as
3380    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
3381    ///   return.
3382    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
3383    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
3384    ///   would not respond quickly.
3385    pub fn r#check_all_buffers_allocated(
3386        &self,
3387    ) -> fidl::client::QueryResponseFut<
3388        BufferCollectionCheckAllBuffersAllocatedResult,
3389        fidl::encoding::DefaultFuchsiaResourceDialect,
3390    > {
3391        BufferCollectionProxyInterface::r#check_all_buffers_allocated(self)
3392    }
3393
3394    /// Create a new token to add a new participant to an existing logical
3395    /// buffer collection, if the existing collection's buffer counts,
3396    /// constraints, and participants allow.
3397    ///
3398    /// This can be useful in replacing a failed participant, and/or in
3399    /// adding/re-adding a participant after buffers have already been
3400    /// allocated.
3401    ///
3402    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
3403    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
3404    /// goes through the normal procedure of setting constraints or closing
3405    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
3406    /// clients' point of view, despite the possibility that all the buffers
3407    /// were actually allocated previously. This process is called "logical
3408    /// allocation". Most instances of "allocation" in docs for other messages
3409    /// can also be read as "allocation or logical allocation" while remaining
3410    /// valid, but we just say "allocation" in most places for brevity/clarity
3411    /// of explanation, with the details of "logical allocation" left for the
3412    /// docs here on `AttachToken`.
3413    ///
3414    /// Failure of an attached `Node` does not propagate to the parent of the
3415    /// attached `Node`. More generally, failure of a child `Node` is blocked
3416    /// from reaching its parent `Node` if the child is attached, or if the
3417    /// child is dispensable and the failure occurred after logical allocation
3418    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
3419    ///
3420    /// A participant may in some scenarios choose to initially use a
3421    /// dispensable token for a given instance of a delegate participant, and
3422    /// then later if the first instance of that delegate participant fails, a
3423    /// new second instance of that delegate participant my be given a token
3424    /// created with `AttachToken`.
3425    ///
3426    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
3427    /// client end, the token acts like any other token. The client can
3428    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
3429    /// and can send the token to a different process/participant. The
3430    /// `BufferCollectionToken` `Node` should be converted to a
3431    /// `BufferCollection` `Node` as normal by sending
3432    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
3433    /// without causing subtree failure by sending
3434    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
3435    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
3436    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
3437    /// the `BufferCollection`.
3438    ///
3439    /// Within the subtree, a success result from
3440    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
3441    /// the subtree participants' constraints were satisfiable using the
3442    /// already-existing buffer collection, the already-established
3443    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
3444    /// constraints, and the already-existing other participants (already added
3445    /// via successful logical allocation) and their specified buffer counts in
3446    /// their constraints. A failure result means the new participants'
3447    /// constraints cannot be satisfied using the existing buffer collection and
3448    /// its already-added participants. Creating a new collection instead may
3449    /// allow all participants' constraints to be satisfied, assuming
3450    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
3451    /// used.
3452    ///
3453    /// A token created with `AttachToken` performs constraints aggregation with
3454    /// all constraints currently in effect on the buffer collection, plus the
3455    /// attached token under consideration plus child tokens under the attached
3456    /// token which are not themselves an attached token or under such a token.
3457    /// Further subtrees under this subtree are considered for logical
3458    /// allocation only after this subtree has completed logical allocation.
3459    ///
3460    /// Assignment of existing buffers to participants'
3461    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
3462    /// etc is first-come first-served, but a child can't logically allocate
3463    /// before all its parents have sent `SetConstraints`.
3464    ///
3465    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
3466    /// in contrast to `AttachToken`, has the created token `Node` + child
3467    /// `Node`(s) (in the created subtree but not in any subtree under this
3468    /// subtree) participate in constraints aggregation along with its parent
3469    /// during the parent's allocation or logical allocation.
3470    ///
3471    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
3472    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
3473    /// sysmem before the new token can be passed to `BindSharedCollection`. The
3474    /// `Sync` of the new token can be accomplished with
3475    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
3476    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
3477    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
3478    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
3479    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
3480    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
3481    /// created token, to also sync those additional tokens to sysmem using a
3482    /// single round-trip.
3483    ///
3484    /// All table fields are currently required.
3485    ///
3486    /// + request `rights_attentuation_mask` This allows attenuating the VMO
3487    ///   rights of the subtree. These values for `rights_attenuation_mask`
3488    ///   result in no attenuation (note that 0 is not on this list):
3489    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
3490    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
3491    /// + request `token_request` The server end of the `BufferCollectionToken`
3492    ///   channel. The client retains the client end.
3493    pub fn r#attach_token(
3494        &self,
3495        mut payload: BufferCollectionAttachTokenRequest,
3496    ) -> Result<(), fidl::Error> {
3497        BufferCollectionProxyInterface::r#attach_token(self, payload)
3498    }
3499
3500    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
3501    /// buffers have been allocated and only the specified number of buffers (or
3502    /// fewer) remain in the buffer collection.
3503    ///
3504    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
3505    /// client to wait until an old buffer collection is fully or mostly
3506    /// deallocated before attempting allocation of a new buffer collection. The
3507    /// eventpair is only signalled when the buffers of this collection have
3508    /// been fully deallocated (not just un-referenced by clients, but all the
3509    /// memory consumed by those buffers has been fully reclaimed/recycled), or
3510    /// when allocation or logical allocation fails for the tree or subtree
3511    /// including this [`fuchsia.sysmem2/BufferCollection`].
3512    ///
3513    /// The eventpair won't be signalled until allocation or logical allocation
3514    /// has completed; until then, the collection's current buffer count is
3515    /// ignored.
3516    ///
3517    /// If logical allocation fails for an attached subtree (using
3518    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
3519    /// eventpair will close during that failure regardless of the number of
3520    /// buffers potenitally allocated in the overall buffer collection. This is
3521    /// for logical allocation consistency with normal allocation.
3522    ///
3523    /// The lifetime signalled by this event includes asynchronous cleanup of
3524    /// allocated buffers, and this asynchronous cleanup cannot occur until all
3525    /// holders of VMO handles to the buffers have closed those VMO handles.
3526    /// Therefore, clients should take care not to become blocked forever
3527    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
3528    /// participants using the logical buffer collection (including the waiter
3529    /// itself) are less trusted, less reliable, or potentially blocked by the
3530    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
3531    /// for the client wait may be prudent, depending on details of how the
3532    /// collection and/or its VMOs are used or shared. Failure to allocate a
3533    /// new/replacement buffer collection is better than getting stuck forever.
3534    ///
3535    /// The sysmem server itself intentionally does not perform any waiting on
3536    /// already-failed collections' VMOs to finish cleaning up before attempting
3537    /// a new allocation, and the sysmem server intentionally doesn't retry
3538    /// allocation if a new allocation fails due to out of memory, even if that
3539    /// failure is potentially due to continued existence of an old collection's
3540    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
3541    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
3542    /// as long as the waiting client is careful to not create a deadlock.
3543    ///
3544    /// Continued existence of old collections that are still cleaning up is not
3545    /// the only reason that a new allocation may fail due to insufficient
3546    /// memory, even if the new allocation is allocating physically contiguous
3547    /// buffers. Overall system memory pressure can also be the cause of failure
3548    /// to allocate a new collection. See also
3549    /// [`fuchsia.memorypressure/Provider`].
3550    ///
3551    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
3552    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
3553    /// `eventpair` handle (server end) can be sent via more than one
3554    /// `AttachLifetimeTracking` message to different protocols, and the
3555    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
3556    /// the conditions are met (all holders of duplicates have closed their
3557    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
3558    /// client end can (also) be duplicated without preventing the
3559    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
3560    ///
3561    /// The server intentionally doesn't "trust" any signals set on the
3562    /// `server_end`. This mechanism intentionally uses only
3563    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
3564    /// "early", and is only set when all handles to the server end eventpair
3565    /// are closed. No meaning is associated with any of the other signals, and
3566    /// clients should ignore any other signal bits on either end of the
3567    /// `eventpair`.
3568    ///
3569    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
3570    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
3571    /// transfer without causing `BufferCollection` channel failure).
3572    ///
3573    /// All table fields are currently required.
3574    ///
3575    /// + request `server_end` This eventpair handle will be closed by the
3576    ///   sysmem server when buffers have been allocated initially and the
3577    ///   number of buffers is then less than or equal to `buffers_remaining`.
3578    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
3579    ///   fewer) buffers to be fully deallocated. A number greater than zero can
3580    ///   be useful in situations where a known number of buffers are
3581    ///   intentionally not closed so that the data can continue to be used,
3582    ///   such as for keeping the last available video frame displayed in the UI
3583    ///   even if the video stream was using protected output buffers. It's
3584    ///   outside the scope of the `BufferCollection` interface (at least for
3585    ///   now) to determine how many buffers may be held without closing, but
3586    ///   it'll typically be in the range 0-2.
3587    pub fn r#attach_lifetime_tracking(
3588        &self,
3589        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3590    ) -> Result<(), fidl::Error> {
3591        BufferCollectionProxyInterface::r#attach_lifetime_tracking(self, payload)
3592    }
3593}
3594
3595impl BufferCollectionProxyInterface for BufferCollectionProxy {
3596    type SyncResponseFut =
3597        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
3598    fn r#sync(&self) -> Self::SyncResponseFut {
3599        fn _decode(
3600            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3601        ) -> Result<(), fidl::Error> {
3602            let _response = fidl::client::decode_transaction_body::<
3603                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
3604                fidl::encoding::DefaultFuchsiaResourceDialect,
3605                0x11ac2555cf575b54,
3606            >(_buf?)?
3607            .into_result::<BufferCollectionMarker>("sync")?;
3608            Ok(_response)
3609        }
3610        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
3611            (),
3612            0x11ac2555cf575b54,
3613            fidl::encoding::DynamicFlags::FLEXIBLE,
3614            _decode,
3615        )
3616    }
3617
3618    fn r#release(&self) -> Result<(), fidl::Error> {
3619        self.client.send::<fidl::encoding::EmptyPayload>(
3620            (),
3621            0x6a5cae7d6d6e04c6,
3622            fidl::encoding::DynamicFlags::FLEXIBLE,
3623        )
3624    }
3625
3626    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
3627        self.client.send::<NodeSetNameRequest>(
3628            payload,
3629            0xb41f1624f48c1e9,
3630            fidl::encoding::DynamicFlags::FLEXIBLE,
3631        )
3632    }
3633
3634    fn r#set_debug_client_info(
3635        &self,
3636        mut payload: &NodeSetDebugClientInfoRequest,
3637    ) -> Result<(), fidl::Error> {
3638        self.client.send::<NodeSetDebugClientInfoRequest>(
3639            payload,
3640            0x5cde8914608d99b1,
3641            fidl::encoding::DynamicFlags::FLEXIBLE,
3642        )
3643    }
3644
3645    fn r#set_debug_timeout_log_deadline(
3646        &self,
3647        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3648    ) -> Result<(), fidl::Error> {
3649        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
3650            payload,
3651            0x716b0af13d5c0806,
3652            fidl::encoding::DynamicFlags::FLEXIBLE,
3653        )
3654    }
3655
3656    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3657        self.client.send::<fidl::encoding::EmptyPayload>(
3658            (),
3659            0x5209c77415b4dfad,
3660            fidl::encoding::DynamicFlags::FLEXIBLE,
3661        )
3662    }
3663
3664    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
3665        NodeGetNodeRefResponse,
3666        fidl::encoding::DefaultFuchsiaResourceDialect,
3667    >;
3668    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
3669        fn _decode(
3670            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3671        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
3672            let _response = fidl::client::decode_transaction_body::<
3673                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
3674                fidl::encoding::DefaultFuchsiaResourceDialect,
3675                0x5b3d0e51614df053,
3676            >(_buf?)?
3677            .into_result::<BufferCollectionMarker>("get_node_ref")?;
3678            Ok(_response)
3679        }
3680        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
3681            (),
3682            0x5b3d0e51614df053,
3683            fidl::encoding::DynamicFlags::FLEXIBLE,
3684            _decode,
3685        )
3686    }
3687
3688    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
3689        NodeIsAlternateForResult,
3690        fidl::encoding::DefaultFuchsiaResourceDialect,
3691    >;
3692    fn r#is_alternate_for(
3693        &self,
3694        mut payload: NodeIsAlternateForRequest,
3695    ) -> Self::IsAlternateForResponseFut {
3696        fn _decode(
3697            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3698        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
3699            let _response = fidl::client::decode_transaction_body::<
3700                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
3701                fidl::encoding::DefaultFuchsiaResourceDialect,
3702                0x3a58e00157e0825,
3703            >(_buf?)?
3704            .into_result::<BufferCollectionMarker>("is_alternate_for")?;
3705            Ok(_response.map(|x| x))
3706        }
3707        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
3708            &mut payload,
3709            0x3a58e00157e0825,
3710            fidl::encoding::DynamicFlags::FLEXIBLE,
3711            _decode,
3712        )
3713    }
3714
3715    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
3716        NodeGetBufferCollectionIdResponse,
3717        fidl::encoding::DefaultFuchsiaResourceDialect,
3718    >;
3719    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
3720        fn _decode(
3721            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3722        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
3723            let _response = fidl::client::decode_transaction_body::<
3724                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
3725                fidl::encoding::DefaultFuchsiaResourceDialect,
3726                0x77d19a494b78ba8c,
3727            >(_buf?)?
3728            .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
3729            Ok(_response)
3730        }
3731        self.client.send_query_and_decode::<
3732            fidl::encoding::EmptyPayload,
3733            NodeGetBufferCollectionIdResponse,
3734        >(
3735            (),
3736            0x77d19a494b78ba8c,
3737            fidl::encoding::DynamicFlags::FLEXIBLE,
3738            _decode,
3739        )
3740    }
3741
3742    fn r#set_weak(&self) -> Result<(), fidl::Error> {
3743        self.client.send::<fidl::encoding::EmptyPayload>(
3744            (),
3745            0x22dd3ea514eeffe1,
3746            fidl::encoding::DynamicFlags::FLEXIBLE,
3747        )
3748    }
3749
3750    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3751        self.client.send::<NodeSetWeakOkRequest>(
3752            &mut payload,
3753            0x38a44fc4d7724be9,
3754            fidl::encoding::DynamicFlags::FLEXIBLE,
3755        )
3756    }
3757
3758    fn r#attach_node_tracking(
3759        &self,
3760        mut payload: NodeAttachNodeTrackingRequest,
3761    ) -> Result<(), fidl::Error> {
3762        self.client.send::<NodeAttachNodeTrackingRequest>(
3763            &mut payload,
3764            0x3f22f2a293d3cdac,
3765            fidl::encoding::DynamicFlags::FLEXIBLE,
3766        )
3767    }
3768
3769    fn r#set_constraints(
3770        &self,
3771        mut payload: BufferCollectionSetConstraintsRequest,
3772    ) -> Result<(), fidl::Error> {
3773        self.client.send::<BufferCollectionSetConstraintsRequest>(
3774            &mut payload,
3775            0x1fde0f19d650197b,
3776            fidl::encoding::DynamicFlags::FLEXIBLE,
3777        )
3778    }
3779
3780    type WaitForAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3781        BufferCollectionWaitForAllBuffersAllocatedResult,
3782        fidl::encoding::DefaultFuchsiaResourceDialect,
3783    >;
3784    fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut {
3785        fn _decode(
3786            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3787        ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
3788            let _response = fidl::client::decode_transaction_body::<
3789                fidl::encoding::FlexibleResultType<
3790                    BufferCollectionWaitForAllBuffersAllocatedResponse,
3791                    Error,
3792                >,
3793                fidl::encoding::DefaultFuchsiaResourceDialect,
3794                0x62300344b61404e,
3795            >(_buf?)?
3796            .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
3797            Ok(_response.map(|x| x))
3798        }
3799        self.client.send_query_and_decode::<
3800            fidl::encoding::EmptyPayload,
3801            BufferCollectionWaitForAllBuffersAllocatedResult,
3802        >(
3803            (),
3804            0x62300344b61404e,
3805            fidl::encoding::DynamicFlags::FLEXIBLE,
3806            _decode,
3807        )
3808    }
3809
3810    type CheckAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3811        BufferCollectionCheckAllBuffersAllocatedResult,
3812        fidl::encoding::DefaultFuchsiaResourceDialect,
3813    >;
3814    fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut {
3815        fn _decode(
3816            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3817        ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
3818            let _response = fidl::client::decode_transaction_body::<
3819                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
3820                fidl::encoding::DefaultFuchsiaResourceDialect,
3821                0x35a5fe77ce939c10,
3822            >(_buf?)?
3823            .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
3824            Ok(_response.map(|x| x))
3825        }
3826        self.client.send_query_and_decode::<
3827            fidl::encoding::EmptyPayload,
3828            BufferCollectionCheckAllBuffersAllocatedResult,
3829        >(
3830            (),
3831            0x35a5fe77ce939c10,
3832            fidl::encoding::DynamicFlags::FLEXIBLE,
3833            _decode,
3834        )
3835    }
3836
3837    fn r#attach_token(
3838        &self,
3839        mut payload: BufferCollectionAttachTokenRequest,
3840    ) -> Result<(), fidl::Error> {
3841        self.client.send::<BufferCollectionAttachTokenRequest>(
3842            &mut payload,
3843            0x46ac7d0008492982,
3844            fidl::encoding::DynamicFlags::FLEXIBLE,
3845        )
3846    }
3847
3848    fn r#attach_lifetime_tracking(
3849        &self,
3850        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3851    ) -> Result<(), fidl::Error> {
3852        self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
3853            &mut payload,
3854            0x3ecb510113116dcf,
3855            fidl::encoding::DynamicFlags::FLEXIBLE,
3856        )
3857    }
3858}
3859
3860pub struct BufferCollectionEventStream {
3861    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
3862}
3863
3864impl std::marker::Unpin for BufferCollectionEventStream {}
3865
3866impl futures::stream::FusedStream for BufferCollectionEventStream {
3867    fn is_terminated(&self) -> bool {
3868        self.event_receiver.is_terminated()
3869    }
3870}
3871
3872impl futures::Stream for BufferCollectionEventStream {
3873    type Item = Result<BufferCollectionEvent, fidl::Error>;
3874
3875    fn poll_next(
3876        mut self: std::pin::Pin<&mut Self>,
3877        cx: &mut std::task::Context<'_>,
3878    ) -> std::task::Poll<Option<Self::Item>> {
3879        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
3880            &mut self.event_receiver,
3881            cx
3882        )?) {
3883            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionEvent::decode(buf))),
3884            None => std::task::Poll::Ready(None),
3885        }
3886    }
3887}
3888
3889#[derive(Debug)]
3890pub enum BufferCollectionEvent {
3891    #[non_exhaustive]
3892    _UnknownEvent {
3893        /// Ordinal of the event that was sent.
3894        ordinal: u64,
3895    },
3896}
3897
3898impl BufferCollectionEvent {
3899    /// Decodes a message buffer as a [`BufferCollectionEvent`].
3900    fn decode(
3901        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
3902    ) -> Result<BufferCollectionEvent, fidl::Error> {
3903        let (bytes, _handles) = buf.split_mut();
3904        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3905        debug_assert_eq!(tx_header.tx_id, 0);
3906        match tx_header.ordinal {
3907            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
3908                Ok(BufferCollectionEvent::_UnknownEvent { ordinal: tx_header.ordinal })
3909            }
3910            _ => Err(fidl::Error::UnknownOrdinal {
3911                ordinal: tx_header.ordinal,
3912                protocol_name:
3913                    <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
3914            }),
3915        }
3916    }
3917}
3918
3919/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollection.
3920pub struct BufferCollectionRequestStream {
3921    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3922    is_terminated: bool,
3923}
3924
3925impl std::marker::Unpin for BufferCollectionRequestStream {}
3926
3927impl futures::stream::FusedStream for BufferCollectionRequestStream {
3928    fn is_terminated(&self) -> bool {
3929        self.is_terminated
3930    }
3931}
3932
3933impl fidl::endpoints::RequestStream for BufferCollectionRequestStream {
3934    type Protocol = BufferCollectionMarker;
3935    type ControlHandle = BufferCollectionControlHandle;
3936
3937    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
3938        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
3939    }
3940
3941    fn control_handle(&self) -> Self::ControlHandle {
3942        BufferCollectionControlHandle { inner: self.inner.clone() }
3943    }
3944
3945    fn into_inner(
3946        self,
3947    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
3948    {
3949        (self.inner, self.is_terminated)
3950    }
3951
3952    fn from_inner(
3953        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3954        is_terminated: bool,
3955    ) -> Self {
3956        Self { inner, is_terminated }
3957    }
3958}
3959
3960impl futures::Stream for BufferCollectionRequestStream {
3961    type Item = Result<BufferCollectionRequest, fidl::Error>;
3962
3963    fn poll_next(
3964        mut self: std::pin::Pin<&mut Self>,
3965        cx: &mut std::task::Context<'_>,
3966    ) -> std::task::Poll<Option<Self::Item>> {
3967        let this = &mut *self;
3968        if this.inner.check_shutdown(cx) {
3969            this.is_terminated = true;
3970            return std::task::Poll::Ready(None);
3971        }
3972        if this.is_terminated {
3973            panic!("polled BufferCollectionRequestStream after completion");
3974        }
3975        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
3976            |bytes, handles| {
3977                match this.inner.channel().read_etc(cx, bytes, handles) {
3978                    std::task::Poll::Ready(Ok(())) => {}
3979                    std::task::Poll::Pending => return std::task::Poll::Pending,
3980                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
3981                        this.is_terminated = true;
3982                        return std::task::Poll::Ready(None);
3983                    }
3984                    std::task::Poll::Ready(Err(e)) => {
3985                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
3986                            e.into(),
3987                        ))));
3988                    }
3989                }
3990
3991                // A message has been received from the channel
3992                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3993
3994                std::task::Poll::Ready(Some(match header.ordinal {
3995                    0x11ac2555cf575b54 => {
3996                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
3997                        let mut req = fidl::new_empty!(
3998                            fidl::encoding::EmptyPayload,
3999                            fidl::encoding::DefaultFuchsiaResourceDialect
4000                        );
4001                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4002                        let control_handle =
4003                            BufferCollectionControlHandle { inner: this.inner.clone() };
4004                        Ok(BufferCollectionRequest::Sync {
4005                            responder: BufferCollectionSyncResponder {
4006                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4007                                tx_id: header.tx_id,
4008                            },
4009                        })
4010                    }
4011                    0x6a5cae7d6d6e04c6 => {
4012                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4013                        let mut req = fidl::new_empty!(
4014                            fidl::encoding::EmptyPayload,
4015                            fidl::encoding::DefaultFuchsiaResourceDialect
4016                        );
4017                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4018                        let control_handle =
4019                            BufferCollectionControlHandle { inner: this.inner.clone() };
4020                        Ok(BufferCollectionRequest::Release { control_handle })
4021                    }
4022                    0xb41f1624f48c1e9 => {
4023                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4024                        let mut req = fidl::new_empty!(
4025                            NodeSetNameRequest,
4026                            fidl::encoding::DefaultFuchsiaResourceDialect
4027                        );
4028                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
4029                        let control_handle =
4030                            BufferCollectionControlHandle { inner: this.inner.clone() };
4031                        Ok(BufferCollectionRequest::SetName { payload: req, control_handle })
4032                    }
4033                    0x5cde8914608d99b1 => {
4034                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4035                        let mut req = fidl::new_empty!(
4036                            NodeSetDebugClientInfoRequest,
4037                            fidl::encoding::DefaultFuchsiaResourceDialect
4038                        );
4039                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
4040                        let control_handle =
4041                            BufferCollectionControlHandle { inner: this.inner.clone() };
4042                        Ok(BufferCollectionRequest::SetDebugClientInfo {
4043                            payload: req,
4044                            control_handle,
4045                        })
4046                    }
4047                    0x716b0af13d5c0806 => {
4048                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4049                        let mut req = fidl::new_empty!(
4050                            NodeSetDebugTimeoutLogDeadlineRequest,
4051                            fidl::encoding::DefaultFuchsiaResourceDialect
4052                        );
4053                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
4054                        let control_handle =
4055                            BufferCollectionControlHandle { inner: this.inner.clone() };
4056                        Ok(BufferCollectionRequest::SetDebugTimeoutLogDeadline {
4057                            payload: req,
4058                            control_handle,
4059                        })
4060                    }
4061                    0x5209c77415b4dfad => {
4062                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4063                        let mut req = fidl::new_empty!(
4064                            fidl::encoding::EmptyPayload,
4065                            fidl::encoding::DefaultFuchsiaResourceDialect
4066                        );
4067                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4068                        let control_handle =
4069                            BufferCollectionControlHandle { inner: this.inner.clone() };
4070                        Ok(BufferCollectionRequest::SetVerboseLogging { control_handle })
4071                    }
4072                    0x5b3d0e51614df053 => {
4073                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4074                        let mut req = fidl::new_empty!(
4075                            fidl::encoding::EmptyPayload,
4076                            fidl::encoding::DefaultFuchsiaResourceDialect
4077                        );
4078                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4079                        let control_handle =
4080                            BufferCollectionControlHandle { inner: this.inner.clone() };
4081                        Ok(BufferCollectionRequest::GetNodeRef {
4082                            responder: BufferCollectionGetNodeRefResponder {
4083                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4084                                tx_id: header.tx_id,
4085                            },
4086                        })
4087                    }
4088                    0x3a58e00157e0825 => {
4089                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4090                        let mut req = fidl::new_empty!(
4091                            NodeIsAlternateForRequest,
4092                            fidl::encoding::DefaultFuchsiaResourceDialect
4093                        );
4094                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
4095                        let control_handle =
4096                            BufferCollectionControlHandle { inner: this.inner.clone() };
4097                        Ok(BufferCollectionRequest::IsAlternateFor {
4098                            payload: req,
4099                            responder: BufferCollectionIsAlternateForResponder {
4100                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4101                                tx_id: header.tx_id,
4102                            },
4103                        })
4104                    }
4105                    0x77d19a494b78ba8c => {
4106                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4107                        let mut req = fidl::new_empty!(
4108                            fidl::encoding::EmptyPayload,
4109                            fidl::encoding::DefaultFuchsiaResourceDialect
4110                        );
4111                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4112                        let control_handle =
4113                            BufferCollectionControlHandle { inner: this.inner.clone() };
4114                        Ok(BufferCollectionRequest::GetBufferCollectionId {
4115                            responder: BufferCollectionGetBufferCollectionIdResponder {
4116                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4117                                tx_id: header.tx_id,
4118                            },
4119                        })
4120                    }
4121                    0x22dd3ea514eeffe1 => {
4122                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4123                        let mut req = fidl::new_empty!(
4124                            fidl::encoding::EmptyPayload,
4125                            fidl::encoding::DefaultFuchsiaResourceDialect
4126                        );
4127                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4128                        let control_handle =
4129                            BufferCollectionControlHandle { inner: this.inner.clone() };
4130                        Ok(BufferCollectionRequest::SetWeak { control_handle })
4131                    }
4132                    0x38a44fc4d7724be9 => {
4133                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4134                        let mut req = fidl::new_empty!(
4135                            NodeSetWeakOkRequest,
4136                            fidl::encoding::DefaultFuchsiaResourceDialect
4137                        );
4138                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
4139                        let control_handle =
4140                            BufferCollectionControlHandle { inner: this.inner.clone() };
4141                        Ok(BufferCollectionRequest::SetWeakOk { payload: req, control_handle })
4142                    }
4143                    0x3f22f2a293d3cdac => {
4144                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4145                        let mut req = fidl::new_empty!(
4146                            NodeAttachNodeTrackingRequest,
4147                            fidl::encoding::DefaultFuchsiaResourceDialect
4148                        );
4149                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4150                        let control_handle =
4151                            BufferCollectionControlHandle { inner: this.inner.clone() };
4152                        Ok(BufferCollectionRequest::AttachNodeTracking {
4153                            payload: req,
4154                            control_handle,
4155                        })
4156                    }
4157                    0x1fde0f19d650197b => {
4158                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4159                        let mut req = fidl::new_empty!(
4160                            BufferCollectionSetConstraintsRequest,
4161                            fidl::encoding::DefaultFuchsiaResourceDialect
4162                        );
4163                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionSetConstraintsRequest>(&header, _body_bytes, handles, &mut req)?;
4164                        let control_handle =
4165                            BufferCollectionControlHandle { inner: this.inner.clone() };
4166                        Ok(BufferCollectionRequest::SetConstraints { payload: req, control_handle })
4167                    }
4168                    0x62300344b61404e => {
4169                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4170                        let mut req = fidl::new_empty!(
4171                            fidl::encoding::EmptyPayload,
4172                            fidl::encoding::DefaultFuchsiaResourceDialect
4173                        );
4174                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4175                        let control_handle =
4176                            BufferCollectionControlHandle { inner: this.inner.clone() };
4177                        Ok(BufferCollectionRequest::WaitForAllBuffersAllocated {
4178                            responder: BufferCollectionWaitForAllBuffersAllocatedResponder {
4179                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4180                                tx_id: header.tx_id,
4181                            },
4182                        })
4183                    }
4184                    0x35a5fe77ce939c10 => {
4185                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4186                        let mut req = fidl::new_empty!(
4187                            fidl::encoding::EmptyPayload,
4188                            fidl::encoding::DefaultFuchsiaResourceDialect
4189                        );
4190                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4191                        let control_handle =
4192                            BufferCollectionControlHandle { inner: this.inner.clone() };
4193                        Ok(BufferCollectionRequest::CheckAllBuffersAllocated {
4194                            responder: BufferCollectionCheckAllBuffersAllocatedResponder {
4195                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4196                                tx_id: header.tx_id,
4197                            },
4198                        })
4199                    }
4200                    0x46ac7d0008492982 => {
4201                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4202                        let mut req = fidl::new_empty!(
4203                            BufferCollectionAttachTokenRequest,
4204                            fidl::encoding::DefaultFuchsiaResourceDialect
4205                        );
4206                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachTokenRequest>(&header, _body_bytes, handles, &mut req)?;
4207                        let control_handle =
4208                            BufferCollectionControlHandle { inner: this.inner.clone() };
4209                        Ok(BufferCollectionRequest::AttachToken { payload: req, control_handle })
4210                    }
4211                    0x3ecb510113116dcf => {
4212                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4213                        let mut req = fidl::new_empty!(
4214                            BufferCollectionAttachLifetimeTrackingRequest,
4215                            fidl::encoding::DefaultFuchsiaResourceDialect
4216                        );
4217                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachLifetimeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4218                        let control_handle =
4219                            BufferCollectionControlHandle { inner: this.inner.clone() };
4220                        Ok(BufferCollectionRequest::AttachLifetimeTracking {
4221                            payload: req,
4222                            control_handle,
4223                        })
4224                    }
4225                    _ if header.tx_id == 0
4226                        && header
4227                            .dynamic_flags()
4228                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4229                    {
4230                        Ok(BufferCollectionRequest::_UnknownMethod {
4231                            ordinal: header.ordinal,
4232                            control_handle: BufferCollectionControlHandle {
4233                                inner: this.inner.clone(),
4234                            },
4235                            method_type: fidl::MethodType::OneWay,
4236                        })
4237                    }
4238                    _ if header
4239                        .dynamic_flags()
4240                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4241                    {
4242                        this.inner.send_framework_err(
4243                            fidl::encoding::FrameworkErr::UnknownMethod,
4244                            header.tx_id,
4245                            header.ordinal,
4246                            header.dynamic_flags(),
4247                            (bytes, handles),
4248                        )?;
4249                        Ok(BufferCollectionRequest::_UnknownMethod {
4250                            ordinal: header.ordinal,
4251                            control_handle: BufferCollectionControlHandle {
4252                                inner: this.inner.clone(),
4253                            },
4254                            method_type: fidl::MethodType::TwoWay,
4255                        })
4256                    }
4257                    _ => Err(fidl::Error::UnknownOrdinal {
4258                        ordinal: header.ordinal,
4259                        protocol_name:
4260                            <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
4261                    }),
4262                }))
4263            },
4264        )
4265    }
4266}
4267
4268/// [`fuchsia.sysmem2/BufferCollection`] is a connection directly from a
4269/// participant to sysmem re. a buffer collection; often the buffer collection
4270/// is shared with other participants which have their own `BufferCollection`
4271/// client end(s) associated with the same buffer collection.  In other words,
4272/// an instance of the `BufferCollection` interface is a view of a buffer
4273/// collection, not the buffer collection itself.
4274///
4275/// The `BufferCollection` connection exists to facilitate async indication of
4276/// when the buffer collection has been populated with buffers.
4277///
4278/// Also, the channel's closure by the sysmem server is an indication to the
4279/// client that the client should close all VMO handles that were obtained from
4280/// the `BufferCollection` ASAP.
4281///
4282/// Some buffer collections can use enough memory that it can be worth avoiding
4283/// allocation overlap (in time) using
4284/// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] so that the
4285/// initiator can tell when enough buffers of the buffer collection have been
4286/// fully deallocated prior to the initiator allocating a new buffer collection.
4287///
4288/// Epitaphs are not used in this protocol.
4289#[derive(Debug)]
4290pub enum BufferCollectionRequest {
4291    /// Ensure that previous messages have been received server side. This is
4292    /// particularly useful after previous messages that created new tokens,
4293    /// because a token must be known to the sysmem server before sending the
4294    /// token to another participant.
4295    ///
4296    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
4297    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
4298    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
4299    /// to mitigate the possibility of a hostile/fake
4300    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
4301    /// Another way is to pass the token to
4302    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
4303    /// the token as part of exchanging it for a
4304    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
4305    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
4306    /// of stalling.
4307    ///
4308    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
4309    /// and then starting and completing a `Sync`, it's then safe to send the
4310    /// `BufferCollectionToken` client ends to other participants knowing the
4311    /// server will recognize the tokens when they're sent by the other
4312    /// participants to sysmem in a
4313    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
4314    /// efficient way to create tokens while avoiding unnecessary round trips.
4315    ///
4316    /// Other options include waiting for each
4317    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
4318    /// individually (using separate call to `Sync` after each), or calling
4319    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
4320    /// converted to a `BufferCollection` via
4321    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
4322    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
4323    /// the sync step and can create multiple tokens at once.
4324    Sync { responder: BufferCollectionSyncResponder },
4325    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
4326    ///
4327    /// Normally a participant will convert a `BufferCollectionToken` into a
4328    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
4329    /// `Release` via the token (and then close the channel immediately or
4330    /// shortly later in response to server closing the server end), which
4331    /// avoids causing buffer collection failure. Without a prior `Release`,
4332    /// closing the `BufferCollectionToken` client end will cause buffer
4333    /// collection failure.
4334    ///
4335    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
4336    ///
4337    /// By default the server handles unexpected closure of a
4338    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
4339    /// first) by failing the buffer collection. Partly this is to expedite
4340    /// closing VMO handles to reclaim memory when any participant fails. If a
4341    /// participant would like to cleanly close a `BufferCollection` without
4342    /// causing buffer collection failure, the participant can send `Release`
4343    /// before closing the `BufferCollection` client end. The `Release` can
4344    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
4345    /// buffer collection won't require constraints from this node in order to
4346    /// allocate. If after `SetConstraints`, the constraints are retained and
4347    /// aggregated, despite the lack of `BufferCollection` connection at the
4348    /// time of constraints aggregation.
4349    ///
4350    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
4351    ///
4352    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
4353    /// end (without `Release` first) will trigger failure of the buffer
4354    /// collection. To close a `BufferCollectionTokenGroup` channel without
4355    /// failing the buffer collection, ensure that AllChildrenPresent() has been
4356    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
4357    /// client end.
4358    ///
4359    /// If `Release` occurs before
4360    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
4361    /// buffer collection will fail (triggered by reception of `Release` without
4362    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
4363    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
4364    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
4365    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
4366    /// close requires `AllChildrenPresent` (if not already sent), then
4367    /// `Release`, then close client end.
4368    ///
4369    /// If `Release` occurs after `AllChildrenPresent`, the children and all
4370    /// their constraints remain intact (just as they would if the
4371    /// `BufferCollectionTokenGroup` channel had remained open), and the client
4372    /// end close doesn't trigger buffer collection failure.
4373    ///
4374    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
4375    ///
4376    /// For brevity, the per-channel-protocol paragraphs above ignore the
4377    /// separate failure domain created by
4378    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
4379    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
4380    /// unexpectedly closes (without `Release` first) and that client end is
4381    /// under a failure domain, instead of failing the whole buffer collection,
4382    /// the failure domain is failed, but the buffer collection itself is
4383    /// isolated from failure of the failure domain. Such failure domains can be
4384    /// nested, in which case only the inner-most failure domain in which the
4385    /// `Node` resides fails.
4386    Release { control_handle: BufferCollectionControlHandle },
4387    /// Set a name for VMOs in this buffer collection.
4388    ///
4389    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
4390    /// will be truncated to fit. The name of the vmo will be suffixed with the
4391    /// buffer index within the collection (if the suffix fits within
4392    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
4393    /// listed in the inspect data.
4394    ///
4395    /// The name only affects VMOs allocated after the name is set; this call
4396    /// does not rename existing VMOs. If multiple clients set different names
4397    /// then the larger priority value will win. Setting a new name with the
4398    /// same priority as a prior name doesn't change the name.
4399    ///
4400    /// All table fields are currently required.
4401    ///
4402    /// + request `priority` The name is only set if this is the first `SetName`
4403    ///   or if `priority` is greater than any previous `priority` value in
4404    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
4405    /// + request `name` The name for VMOs created under this buffer collection.
4406    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionControlHandle },
4407    /// Set information about the current client that can be used by sysmem to
4408    /// help diagnose leaking memory and allocation stalls waiting for a
4409    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
4410    ///
4411    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
4412    /// `Node`(s) derived from this `Node`, unless overriden by
4413    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
4414    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
4415    ///
4416    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
4417    /// `Allocator` is the most efficient way to ensure that all
4418    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
4419    /// set, and is also more efficient than separately sending the same debug
4420    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
4421    /// created [`fuchsia.sysmem2/Node`].
4422    ///
4423    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
4424    /// indicate which client is closing their channel first, leading to subtree
4425    /// failure (which can be normal if the purpose of the subtree is over, but
4426    /// if happening earlier than expected, the client-channel-specific name can
4427    /// help diagnose where the failure is first coming from, from sysmem's
4428    /// point of view).
4429    ///
4430    /// All table fields are currently required.
4431    ///
4432    /// + request `name` This can be an arbitrary string, but the current
4433    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
4434    /// + request `id` This can be an arbitrary id, but the current process ID
4435    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
4436    SetDebugClientInfo {
4437        payload: NodeSetDebugClientInfoRequest,
4438        control_handle: BufferCollectionControlHandle,
4439    },
4440    /// Sysmem logs a warning if sysmem hasn't seen
4441    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
4442    /// within 5 seconds after creation of a new collection.
4443    ///
4444    /// Clients can call this method to change when the log is printed. If
4445    /// multiple client set the deadline, it's unspecified which deadline will
4446    /// take effect.
4447    ///
4448    /// In most cases the default works well.
4449    ///
4450    /// All table fields are currently required.
4451    ///
4452    /// + request `deadline` The time at which sysmem will start trying to log
4453    ///   the warning, unless all constraints are with sysmem by then.
4454    SetDebugTimeoutLogDeadline {
4455        payload: NodeSetDebugTimeoutLogDeadlineRequest,
4456        control_handle: BufferCollectionControlHandle,
4457    },
4458    /// This enables verbose logging for the buffer collection.
4459    ///
4460    /// Verbose logging includes constraints set via
4461    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
4462    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
4463    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
4464    /// the tree of `Node`(s).
4465    ///
4466    /// Normally sysmem prints only a single line complaint when aggregation
4467    /// fails, with just the specific detailed reason that aggregation failed,
4468    /// with little surrounding context.  While this is often enough to diagnose
4469    /// a problem if only a small change was made and everything was working
4470    /// before the small change, it's often not particularly helpful for getting
4471    /// a new buffer collection to work for the first time.  Especially with
4472    /// more complex trees of nodes, involving things like
4473    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
4474    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
4475    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
4476    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
4477    /// looks like and why it's failing a logical allocation, or why a tree or
4478    /// subtree is failing sooner than expected.
4479    ///
4480    /// The intent of the extra logging is to be acceptable from a performance
4481    /// point of view, under the assumption that verbose logging is only enabled
4482    /// on a low number of buffer collections. If we're not tracking down a bug,
4483    /// we shouldn't send this message.
4484    SetVerboseLogging { control_handle: BufferCollectionControlHandle },
4485    /// This gets a handle that can be used as a parameter to
4486    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
4487    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
4488    /// client obtained this handle from this `Node`.
4489    ///
4490    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
4491    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
4492    /// despite the two calls typically being on different channels.
4493    ///
4494    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
4495    ///
4496    /// All table fields are currently required.
4497    ///
4498    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
4499    ///   different `Node` channel, to prove that the client obtained the handle
4500    ///   from this `Node`.
4501    GetNodeRef { responder: BufferCollectionGetNodeRefResponder },
4502    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
4503    /// rooted at a different child token of a common parent
4504    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
4505    /// passed-in `node_ref`.
4506    ///
4507    /// This call is for assisting with admission control de-duplication, and
4508    /// with debugging.
4509    ///
4510    /// The `node_ref` must be obtained using
4511    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
4512    ///
4513    /// The `node_ref` can be a duplicated handle; it's not necessary to call
4514    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
4515    ///
4516    /// If a calling token may not actually be a valid token at all due to a
4517    /// potentially hostile/untrusted provider of the token, call
4518    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
4519    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
4520    /// never responds due to a calling token not being a real token (not really
4521    /// talking to sysmem).  Another option is to call
4522    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
4523    /// which also validates the token along with converting it to a
4524    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
4525    ///
4526    /// All table fields are currently required.
4527    ///
4528    /// - response `is_alternate`
4529    ///   - true: The first parent node in common between the calling node and
4530    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
4531    ///     that the calling `Node` and the `node_ref` `Node` will not have both
4532    ///     their constraints apply - rather sysmem will choose one or the other
4533    ///     of the constraints - never both.  This is because only one child of
4534    ///     a `BufferCollectionTokenGroup` is selected during logical
4535    ///     allocation, with only that one child's subtree contributing to
4536    ///     constraints aggregation.
4537    ///   - false: The first parent node in common between the calling `Node`
4538    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
4539    ///     Currently, this means the first parent node in common is a
4540    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
4541    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
4542    ///     `Node` may have both their constraints apply during constraints
4543    ///     aggregation of the logical allocation, if both `Node`(s) are
4544    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
4545    ///     this case, there is no `BufferCollectionTokenGroup` that will
4546    ///     directly prevent the two `Node`(s) from both being selected and
4547    ///     their constraints both aggregated, but even when false, one or both
4548    ///     `Node`(s) may still be eliminated from consideration if one or both
4549    ///     `Node`(s) has a direct or indirect parent
4550    ///     `BufferCollectionTokenGroup` which selects a child subtree other
4551    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
4552    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
4553    ///   associated with the same buffer collection as the calling `Node`.
4554    ///   Another reason for this error is if the `node_ref` is an
4555    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
4556    ///   a real `node_ref` obtained from `GetNodeRef`.
4557    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
4558    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
4559    ///   the needed rights expected on a real `node_ref`.
4560    /// * No other failing status codes are returned by this call.  However,
4561    ///   sysmem may add additional codes in future, so the client should have
4562    ///   sensible default handling for any failing status code.
4563    IsAlternateFor {
4564        payload: NodeIsAlternateForRequest,
4565        responder: BufferCollectionIsAlternateForResponder,
4566    },
4567    /// Get the buffer collection ID. This ID is also available from
4568    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
4569    /// within the collection).
4570    ///
4571    /// This call is mainly useful in situations where we can't convey a
4572    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
4573    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
4574    /// handle, which can be joined back up with a `BufferCollection` client end
4575    /// that was created via a different path. Prefer to convey a
4576    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
4577    ///
4578    /// Trusting a `buffer_collection_id` value from a source other than sysmem
4579    /// is analogous to trusting a koid value from a source other than zircon.
4580    /// Both should be avoided unless really necessary, and both require
4581    /// caution. In some situations it may be reasonable to refer to a
4582    /// pre-established `BufferCollection` by `buffer_collection_id` via a
4583    /// protocol for efficiency reasons, but an incoming value purporting to be
4584    /// a `buffer_collection_id` is not sufficient alone to justify granting the
4585    /// sender of the `buffer_collection_id` any capability. The sender must
4586    /// first prove to a receiver that the sender has/had a VMO or has/had a
4587    /// `BufferCollectionToken` to the same collection by sending a handle that
4588    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
4589    /// `buffer_collection_id` value. The receiver should take care to avoid
4590    /// assuming that a sender had a `BufferCollectionToken` in cases where the
4591    /// sender has only proven that the sender had a VMO.
4592    ///
4593    /// - response `buffer_collection_id` This ID is unique per buffer
4594    ///   collection per boot. Each buffer is uniquely identified by the
4595    ///   `buffer_collection_id` and `buffer_index` together.
4596    GetBufferCollectionId { responder: BufferCollectionGetBufferCollectionIdResponder },
4597    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
4598    /// created after this message to weak, which means that a client's `Node`
4599    /// client end (or a child created after this message) is not alone
4600    /// sufficient to keep allocated VMOs alive.
4601    ///
4602    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
4603    /// `close_weak_asap`.
4604    ///
4605    /// This message is only permitted before the `Node` becomes ready for
4606    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
4607    ///   * `BufferCollectionToken`: any time
4608    ///   * `BufferCollection`: before `SetConstraints`
4609    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
4610    ///
4611    /// Currently, no conversion from strong `Node` to weak `Node` after ready
4612    /// for allocation is provided, but a client can simulate that by creating
4613    /// an additional `Node` before allocation and setting that additional
4614    /// `Node` to weak, and then potentially at some point later sending
4615    /// `Release` and closing the client end of the client's strong `Node`, but
4616    /// keeping the client's weak `Node`.
4617    ///
4618    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
4619    /// collection failure (all `Node` client end(s) will see
4620    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
4621    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
4622    /// this situation until all `Node`(s) are ready for allocation. For initial
4623    /// allocation to succeed, at least one strong `Node` is required to exist
4624    /// at allocation time, but after that client receives VMO handles, that
4625    /// client can `BufferCollection.Release` and close the client end without
4626    /// causing this type of failure.
4627    ///
4628    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
4629    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
4630    /// separately as appropriate.
4631    SetWeak { control_handle: BufferCollectionControlHandle },
4632    /// This indicates to sysmem that the client is prepared to pay attention to
4633    /// `close_weak_asap`.
4634    ///
4635    /// If sent, this message must be before
4636    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
4637    ///
4638    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
4639    /// send this message before `WaitForAllBuffersAllocated`, or a parent
4640    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
4641    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
4642    /// trigger buffer collection failure.
4643    ///
4644    /// This message is necessary because weak sysmem VMOs have not always been
4645    /// a thing, so older clients are not aware of the need to pay attention to
4646    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
4647    /// sysmem weak VMO handles asap. By having this message and requiring
4648    /// participants to indicate their acceptance of this aspect of the overall
4649    /// protocol, we avoid situations where an older client is delivered a weak
4650    /// VMO without any way for sysmem to get that VMO to close quickly later
4651    /// (and on a per-buffer basis).
4652    ///
4653    /// A participant that doesn't handle `close_weak_asap` and also doesn't
4654    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
4655    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
4656    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
4657    /// same participant has a child/delegate which does retrieve VMOs, that
4658    /// child/delegate will need to send `SetWeakOk` before
4659    /// `WaitForAllBuffersAllocated`.
4660    ///
4661    /// + request `for_child_nodes_also` If present and true, this means direct
4662    ///   child nodes of this node created after this message plus all
4663    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
4664    ///   those nodes. Any child node of this node that was created before this
4665    ///   message is not included. This setting is "sticky" in the sense that a
4666    ///   subsequent `SetWeakOk` without this bool set to true does not reset
4667    ///   the server-side bool. If this creates a problem for a participant, a
4668    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
4669    ///   tokens instead, as appropriate. A participant should only set
4670    ///   `for_child_nodes_also` true if the participant can really promise to
4671    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
4672    ///   weak VMO handles held by participants holding the corresponding child
4673    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
4674    ///   which are using sysmem(1) can be weak, despite the clients of those
4675    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
4676    ///   direct way to find out about `close_weak_asap`. This only applies to
4677    ///   descendents of this `Node` which are using sysmem(1), not to this
4678    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
4679    ///   token, which will fail allocation unless an ancestor of this `Node`
4680    ///   specified `for_child_nodes_also` true.
4681    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionControlHandle },
4682    /// The server_end will be closed after this `Node` and any child nodes have
4683    /// have released their buffer counts, making those counts available for
4684    /// reservation by a different `Node` via
4685    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
4686    ///
4687    /// The `Node` buffer counts may not be released until the entire tree of
4688    /// `Node`(s) is closed or failed, because
4689    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
4690    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
4691    /// `Node` buffer counts remain reserved until the orphaned node is later
4692    /// cleaned up.
4693    ///
4694    /// If the `Node` exceeds a fairly large number of attached eventpair server
4695    /// ends, a log message will indicate this and the `Node` (and the
4696    /// appropriate) sub-tree will fail.
4697    ///
4698    /// The `server_end` will remain open when
4699    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
4700    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
4701    /// [`fuchsia.sysmem2/BufferCollection`].
4702    ///
4703    /// This message can also be used with a
4704    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
4705    AttachNodeTracking {
4706        payload: NodeAttachNodeTrackingRequest,
4707        control_handle: BufferCollectionControlHandle,
4708    },
4709    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
4710    /// collection.
4711    ///
4712    /// A participant may only call
4713    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
4714    /// [`fuchsia.sysmem2/BufferCollection`].
4715    ///
4716    /// For buffer allocation to be attempted, all holders of a
4717    /// `BufferCollection` client end need to call `SetConstraints` before
4718    /// sysmem will attempt to allocate buffers.
4719    ///
4720    /// + request `constraints` These are the constraints on the buffer
4721    ///   collection imposed by the sending client/participant.  The
4722    ///   `constraints` field is not required to be set. If not set, the client
4723    ///   is not setting any actual constraints, but is indicating that the
4724    ///   client has no constraints to set. A client that doesn't set the
4725    ///   `constraints` field won't receive any VMO handles, but can still find
4726    ///   out how many buffers were allocated and can still refer to buffers by
4727    ///   their `buffer_index`.
4728    SetConstraints {
4729        payload: BufferCollectionSetConstraintsRequest,
4730        control_handle: BufferCollectionControlHandle,
4731    },
4732    /// Wait until all buffers are allocated.
4733    ///
4734    /// This FIDL call completes when buffers have been allocated, or completes
4735    /// with some failure detail if allocation has been attempted but failed.
4736    ///
4737    /// The following must occur before buffers will be allocated:
4738    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
4739    ///     collection must be turned in via `BindSharedCollection` to get a
4740    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
4741    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
4742    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
4743    ///     to them.
4744    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
4745    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
4746    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
4747    ///     sent to them.
4748    ///
4749    /// - result `buffer_collection_info` The VMO handles and other related
4750    ///   info.
4751    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
4752    ///   cannot be fulfilled due to resource exhaustion.
4753    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
4754    ///   malformed.
4755    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
4756    ///   request is valid but cannot be satisfied, perhaps due to hardware
4757    ///   limitations. This can happen if participants have incompatible
4758    ///   constraints (empty intersection, roughly speaking). See the log for
4759    ///   more info. In cases where a participant could potentially be treated
4760    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
4761    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
4762    ///   error code if there aren't enough buffers in the pre-existing
4763    ///   collection to satisfy the constraints set on the attached token and
4764    ///   any sub-tree of tokens derived from the attached token.
4765    WaitForAllBuffersAllocated { responder: BufferCollectionWaitForAllBuffersAllocatedResponder },
4766    /// Checks whether all the buffers have been allocated, in a polling
4767    /// fashion.
4768    ///
4769    /// * If the buffer collection has been allocated, returns success.
4770    /// * If the buffer collection failed allocation, returns the same
4771    ///   [`fuchsia.sysmem2/Error`] as
4772    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
4773    ///   return.
4774    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
4775    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
4776    ///   would not respond quickly.
4777    CheckAllBuffersAllocated { responder: BufferCollectionCheckAllBuffersAllocatedResponder },
4778    /// Create a new token to add a new participant to an existing logical
4779    /// buffer collection, if the existing collection's buffer counts,
4780    /// constraints, and participants allow.
4781    ///
4782    /// This can be useful in replacing a failed participant, and/or in
4783    /// adding/re-adding a participant after buffers have already been
4784    /// allocated.
4785    ///
4786    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
4787    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
4788    /// goes through the normal procedure of setting constraints or closing
4789    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
4790    /// clients' point of view, despite the possibility that all the buffers
4791    /// were actually allocated previously. This process is called "logical
4792    /// allocation". Most instances of "allocation" in docs for other messages
4793    /// can also be read as "allocation or logical allocation" while remaining
4794    /// valid, but we just say "allocation" in most places for brevity/clarity
4795    /// of explanation, with the details of "logical allocation" left for the
4796    /// docs here on `AttachToken`.
4797    ///
4798    /// Failure of an attached `Node` does not propagate to the parent of the
4799    /// attached `Node`. More generally, failure of a child `Node` is blocked
4800    /// from reaching its parent `Node` if the child is attached, or if the
4801    /// child is dispensable and the failure occurred after logical allocation
4802    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
4803    ///
4804    /// A participant may in some scenarios choose to initially use a
4805    /// dispensable token for a given instance of a delegate participant, and
4806    /// then later if the first instance of that delegate participant fails, a
4807    /// new second instance of that delegate participant my be given a token
4808    /// created with `AttachToken`.
4809    ///
4810    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
4811    /// client end, the token acts like any other token. The client can
4812    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
4813    /// and can send the token to a different process/participant. The
4814    /// `BufferCollectionToken` `Node` should be converted to a
4815    /// `BufferCollection` `Node` as normal by sending
4816    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
4817    /// without causing subtree failure by sending
4818    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
4819    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
4820    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
4821    /// the `BufferCollection`.
4822    ///
4823    /// Within the subtree, a success result from
4824    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
4825    /// the subtree participants' constraints were satisfiable using the
4826    /// already-existing buffer collection, the already-established
4827    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
4828    /// constraints, and the already-existing other participants (already added
4829    /// via successful logical allocation) and their specified buffer counts in
4830    /// their constraints. A failure result means the new participants'
4831    /// constraints cannot be satisfied using the existing buffer collection and
4832    /// its already-added participants. Creating a new collection instead may
4833    /// allow all participants' constraints to be satisfied, assuming
4834    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
4835    /// used.
4836    ///
4837    /// A token created with `AttachToken` performs constraints aggregation with
4838    /// all constraints currently in effect on the buffer collection, plus the
4839    /// attached token under consideration plus child tokens under the attached
4840    /// token which are not themselves an attached token or under such a token.
4841    /// Further subtrees under this subtree are considered for logical
4842    /// allocation only after this subtree has completed logical allocation.
4843    ///
4844    /// Assignment of existing buffers to participants'
4845    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
4846    /// etc is first-come first-served, but a child can't logically allocate
4847    /// before all its parents have sent `SetConstraints`.
4848    ///
4849    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
4850    /// in contrast to `AttachToken`, has the created token `Node` + child
4851    /// `Node`(s) (in the created subtree but not in any subtree under this
4852    /// subtree) participate in constraints aggregation along with its parent
4853    /// during the parent's allocation or logical allocation.
4854    ///
4855    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
4856    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
4857    /// sysmem before the new token can be passed to `BindSharedCollection`. The
4858    /// `Sync` of the new token can be accomplished with
4859    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
4860    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
4861    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
4862    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
4863    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
4864    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
4865    /// created token, to also sync those additional tokens to sysmem using a
4866    /// single round-trip.
4867    ///
4868    /// All table fields are currently required.
4869    ///
4870    /// + request `rights_attentuation_mask` This allows attenuating the VMO
4871    ///   rights of the subtree. These values for `rights_attenuation_mask`
4872    ///   result in no attenuation (note that 0 is not on this list):
4873    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
4874    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
4875    /// + request `token_request` The server end of the `BufferCollectionToken`
4876    ///   channel. The client retains the client end.
4877    AttachToken {
4878        payload: BufferCollectionAttachTokenRequest,
4879        control_handle: BufferCollectionControlHandle,
4880    },
4881    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
4882    /// buffers have been allocated and only the specified number of buffers (or
4883    /// fewer) remain in the buffer collection.
4884    ///
4885    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
4886    /// client to wait until an old buffer collection is fully or mostly
4887    /// deallocated before attempting allocation of a new buffer collection. The
4888    /// eventpair is only signalled when the buffers of this collection have
4889    /// been fully deallocated (not just un-referenced by clients, but all the
4890    /// memory consumed by those buffers has been fully reclaimed/recycled), or
4891    /// when allocation or logical allocation fails for the tree or subtree
4892    /// including this [`fuchsia.sysmem2/BufferCollection`].
4893    ///
4894    /// The eventpair won't be signalled until allocation or logical allocation
4895    /// has completed; until then, the collection's current buffer count is
4896    /// ignored.
4897    ///
4898    /// If logical allocation fails for an attached subtree (using
4899    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
4900    /// eventpair will close during that failure regardless of the number of
4901    /// buffers potenitally allocated in the overall buffer collection. This is
4902    /// for logical allocation consistency with normal allocation.
4903    ///
4904    /// The lifetime signalled by this event includes asynchronous cleanup of
4905    /// allocated buffers, and this asynchronous cleanup cannot occur until all
4906    /// holders of VMO handles to the buffers have closed those VMO handles.
4907    /// Therefore, clients should take care not to become blocked forever
4908    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
4909    /// participants using the logical buffer collection (including the waiter
4910    /// itself) are less trusted, less reliable, or potentially blocked by the
4911    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
4912    /// for the client wait may be prudent, depending on details of how the
4913    /// collection and/or its VMOs are used or shared. Failure to allocate a
4914    /// new/replacement buffer collection is better than getting stuck forever.
4915    ///
4916    /// The sysmem server itself intentionally does not perform any waiting on
4917    /// already-failed collections' VMOs to finish cleaning up before attempting
4918    /// a new allocation, and the sysmem server intentionally doesn't retry
4919    /// allocation if a new allocation fails due to out of memory, even if that
4920    /// failure is potentially due to continued existence of an old collection's
4921    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
4922    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
4923    /// as long as the waiting client is careful to not create a deadlock.
4924    ///
4925    /// Continued existence of old collections that are still cleaning up is not
4926    /// the only reason that a new allocation may fail due to insufficient
4927    /// memory, even if the new allocation is allocating physically contiguous
4928    /// buffers. Overall system memory pressure can also be the cause of failure
4929    /// to allocate a new collection. See also
4930    /// [`fuchsia.memorypressure/Provider`].
4931    ///
4932    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
4933    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
4934    /// `eventpair` handle (server end) can be sent via more than one
4935    /// `AttachLifetimeTracking` message to different protocols, and the
4936    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
4937    /// the conditions are met (all holders of duplicates have closed their
4938    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
4939    /// client end can (also) be duplicated without preventing the
4940    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
4941    ///
4942    /// The server intentionally doesn't "trust" any signals set on the
4943    /// `server_end`. This mechanism intentionally uses only
4944    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
4945    /// "early", and is only set when all handles to the server end eventpair
4946    /// are closed. No meaning is associated with any of the other signals, and
4947    /// clients should ignore any other signal bits on either end of the
4948    /// `eventpair`.
4949    ///
4950    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
4951    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
4952    /// transfer without causing `BufferCollection` channel failure).
4953    ///
4954    /// All table fields are currently required.
4955    ///
4956    /// + request `server_end` This eventpair handle will be closed by the
4957    ///   sysmem server when buffers have been allocated initially and the
4958    ///   number of buffers is then less than or equal to `buffers_remaining`.
4959    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
4960    ///   fewer) buffers to be fully deallocated. A number greater than zero can
4961    ///   be useful in situations where a known number of buffers are
4962    ///   intentionally not closed so that the data can continue to be used,
4963    ///   such as for keeping the last available video frame displayed in the UI
4964    ///   even if the video stream was using protected output buffers. It's
4965    ///   outside the scope of the `BufferCollection` interface (at least for
4966    ///   now) to determine how many buffers may be held without closing, but
4967    ///   it'll typically be in the range 0-2.
4968    AttachLifetimeTracking {
4969        payload: BufferCollectionAttachLifetimeTrackingRequest,
4970        control_handle: BufferCollectionControlHandle,
4971    },
4972    /// An interaction was received which does not match any known method.
4973    #[non_exhaustive]
4974    _UnknownMethod {
4975        /// Ordinal of the method that was called.
4976        ordinal: u64,
4977        control_handle: BufferCollectionControlHandle,
4978        method_type: fidl::MethodType,
4979    },
4980}
4981
4982impl BufferCollectionRequest {
4983    #[allow(irrefutable_let_patterns)]
4984    pub fn into_sync(self) -> Option<(BufferCollectionSyncResponder)> {
4985        if let BufferCollectionRequest::Sync { responder } = self {
4986            Some((responder))
4987        } else {
4988            None
4989        }
4990    }
4991
4992    #[allow(irrefutable_let_patterns)]
4993    pub fn into_release(self) -> Option<(BufferCollectionControlHandle)> {
4994        if let BufferCollectionRequest::Release { control_handle } = self {
4995            Some((control_handle))
4996        } else {
4997            None
4998        }
4999    }
5000
5001    #[allow(irrefutable_let_patterns)]
5002    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionControlHandle)> {
5003        if let BufferCollectionRequest::SetName { payload, control_handle } = self {
5004            Some((payload, control_handle))
5005        } else {
5006            None
5007        }
5008    }
5009
5010    #[allow(irrefutable_let_patterns)]
5011    pub fn into_set_debug_client_info(
5012        self,
5013    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionControlHandle)> {
5014        if let BufferCollectionRequest::SetDebugClientInfo { payload, control_handle } = self {
5015            Some((payload, control_handle))
5016        } else {
5017            None
5018        }
5019    }
5020
5021    #[allow(irrefutable_let_patterns)]
5022    pub fn into_set_debug_timeout_log_deadline(
5023        self,
5024    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionControlHandle)> {
5025        if let BufferCollectionRequest::SetDebugTimeoutLogDeadline { payload, control_handle } =
5026            self
5027        {
5028            Some((payload, control_handle))
5029        } else {
5030            None
5031        }
5032    }
5033
5034    #[allow(irrefutable_let_patterns)]
5035    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionControlHandle)> {
5036        if let BufferCollectionRequest::SetVerboseLogging { control_handle } = self {
5037            Some((control_handle))
5038        } else {
5039            None
5040        }
5041    }
5042
5043    #[allow(irrefutable_let_patterns)]
5044    pub fn into_get_node_ref(self) -> Option<(BufferCollectionGetNodeRefResponder)> {
5045        if let BufferCollectionRequest::GetNodeRef { responder } = self {
5046            Some((responder))
5047        } else {
5048            None
5049        }
5050    }
5051
5052    #[allow(irrefutable_let_patterns)]
5053    pub fn into_is_alternate_for(
5054        self,
5055    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionIsAlternateForResponder)> {
5056        if let BufferCollectionRequest::IsAlternateFor { payload, responder } = self {
5057            Some((payload, responder))
5058        } else {
5059            None
5060        }
5061    }
5062
5063    #[allow(irrefutable_let_patterns)]
5064    pub fn into_get_buffer_collection_id(
5065        self,
5066    ) -> Option<(BufferCollectionGetBufferCollectionIdResponder)> {
5067        if let BufferCollectionRequest::GetBufferCollectionId { responder } = self {
5068            Some((responder))
5069        } else {
5070            None
5071        }
5072    }
5073
5074    #[allow(irrefutable_let_patterns)]
5075    pub fn into_set_weak(self) -> Option<(BufferCollectionControlHandle)> {
5076        if let BufferCollectionRequest::SetWeak { control_handle } = self {
5077            Some((control_handle))
5078        } else {
5079            None
5080        }
5081    }
5082
5083    #[allow(irrefutable_let_patterns)]
5084    pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, BufferCollectionControlHandle)> {
5085        if let BufferCollectionRequest::SetWeakOk { payload, control_handle } = self {
5086            Some((payload, control_handle))
5087        } else {
5088            None
5089        }
5090    }
5091
5092    #[allow(irrefutable_let_patterns)]
5093    pub fn into_attach_node_tracking(
5094        self,
5095    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionControlHandle)> {
5096        if let BufferCollectionRequest::AttachNodeTracking { payload, control_handle } = self {
5097            Some((payload, control_handle))
5098        } else {
5099            None
5100        }
5101    }
5102
5103    #[allow(irrefutable_let_patterns)]
5104    pub fn into_set_constraints(
5105        self,
5106    ) -> Option<(BufferCollectionSetConstraintsRequest, BufferCollectionControlHandle)> {
5107        if let BufferCollectionRequest::SetConstraints { payload, control_handle } = self {
5108            Some((payload, control_handle))
5109        } else {
5110            None
5111        }
5112    }
5113
5114    #[allow(irrefutable_let_patterns)]
5115    pub fn into_wait_for_all_buffers_allocated(
5116        self,
5117    ) -> Option<(BufferCollectionWaitForAllBuffersAllocatedResponder)> {
5118        if let BufferCollectionRequest::WaitForAllBuffersAllocated { responder } = self {
5119            Some((responder))
5120        } else {
5121            None
5122        }
5123    }
5124
5125    #[allow(irrefutable_let_patterns)]
5126    pub fn into_check_all_buffers_allocated(
5127        self,
5128    ) -> Option<(BufferCollectionCheckAllBuffersAllocatedResponder)> {
5129        if let BufferCollectionRequest::CheckAllBuffersAllocated { responder } = self {
5130            Some((responder))
5131        } else {
5132            None
5133        }
5134    }
5135
5136    #[allow(irrefutable_let_patterns)]
5137    pub fn into_attach_token(
5138        self,
5139    ) -> Option<(BufferCollectionAttachTokenRequest, BufferCollectionControlHandle)> {
5140        if let BufferCollectionRequest::AttachToken { payload, control_handle } = self {
5141            Some((payload, control_handle))
5142        } else {
5143            None
5144        }
5145    }
5146
5147    #[allow(irrefutable_let_patterns)]
5148    pub fn into_attach_lifetime_tracking(
5149        self,
5150    ) -> Option<(BufferCollectionAttachLifetimeTrackingRequest, BufferCollectionControlHandle)>
5151    {
5152        if let BufferCollectionRequest::AttachLifetimeTracking { payload, control_handle } = self {
5153            Some((payload, control_handle))
5154        } else {
5155            None
5156        }
5157    }
5158
5159    /// Name of the method defined in FIDL
5160    pub fn method_name(&self) -> &'static str {
5161        match *self {
5162            BufferCollectionRequest::Sync { .. } => "sync",
5163            BufferCollectionRequest::Release { .. } => "release",
5164            BufferCollectionRequest::SetName { .. } => "set_name",
5165            BufferCollectionRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
5166            BufferCollectionRequest::SetDebugTimeoutLogDeadline { .. } => {
5167                "set_debug_timeout_log_deadline"
5168            }
5169            BufferCollectionRequest::SetVerboseLogging { .. } => "set_verbose_logging",
5170            BufferCollectionRequest::GetNodeRef { .. } => "get_node_ref",
5171            BufferCollectionRequest::IsAlternateFor { .. } => "is_alternate_for",
5172            BufferCollectionRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
5173            BufferCollectionRequest::SetWeak { .. } => "set_weak",
5174            BufferCollectionRequest::SetWeakOk { .. } => "set_weak_ok",
5175            BufferCollectionRequest::AttachNodeTracking { .. } => "attach_node_tracking",
5176            BufferCollectionRequest::SetConstraints { .. } => "set_constraints",
5177            BufferCollectionRequest::WaitForAllBuffersAllocated { .. } => {
5178                "wait_for_all_buffers_allocated"
5179            }
5180            BufferCollectionRequest::CheckAllBuffersAllocated { .. } => {
5181                "check_all_buffers_allocated"
5182            }
5183            BufferCollectionRequest::AttachToken { .. } => "attach_token",
5184            BufferCollectionRequest::AttachLifetimeTracking { .. } => "attach_lifetime_tracking",
5185            BufferCollectionRequest::_UnknownMethod {
5186                method_type: fidl::MethodType::OneWay,
5187                ..
5188            } => "unknown one-way method",
5189            BufferCollectionRequest::_UnknownMethod {
5190                method_type: fidl::MethodType::TwoWay,
5191                ..
5192            } => "unknown two-way method",
5193        }
5194    }
5195}
5196
5197#[derive(Debug, Clone)]
5198pub struct BufferCollectionControlHandle {
5199    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
5200}
5201
5202impl fidl::endpoints::ControlHandle for BufferCollectionControlHandle {
5203    fn shutdown(&self) {
5204        self.inner.shutdown()
5205    }
5206    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
5207        self.inner.shutdown_with_epitaph(status)
5208    }
5209
5210    fn is_closed(&self) -> bool {
5211        self.inner.channel().is_closed()
5212    }
5213    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
5214        self.inner.channel().on_closed()
5215    }
5216
5217    #[cfg(target_os = "fuchsia")]
5218    fn signal_peer(
5219        &self,
5220        clear_mask: zx::Signals,
5221        set_mask: zx::Signals,
5222    ) -> Result<(), zx_status::Status> {
5223        use fidl::Peered;
5224        self.inner.channel().signal_peer(clear_mask, set_mask)
5225    }
5226}
5227
5228impl BufferCollectionControlHandle {}
5229
5230#[must_use = "FIDL methods require a response to be sent"]
5231#[derive(Debug)]
5232pub struct BufferCollectionSyncResponder {
5233    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5234    tx_id: u32,
5235}
5236
5237/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5238/// if the responder is dropped without sending a response, so that the client
5239/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5240impl std::ops::Drop for BufferCollectionSyncResponder {
5241    fn drop(&mut self) {
5242        self.control_handle.shutdown();
5243        // Safety: drops once, never accessed again
5244        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5245    }
5246}
5247
5248impl fidl::endpoints::Responder for BufferCollectionSyncResponder {
5249    type ControlHandle = BufferCollectionControlHandle;
5250
5251    fn control_handle(&self) -> &BufferCollectionControlHandle {
5252        &self.control_handle
5253    }
5254
5255    fn drop_without_shutdown(mut self) {
5256        // Safety: drops once, never accessed again due to mem::forget
5257        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5258        // Prevent Drop from running (which would shut down the channel)
5259        std::mem::forget(self);
5260    }
5261}
5262
5263impl BufferCollectionSyncResponder {
5264    /// Sends a response to the FIDL transaction.
5265    ///
5266    /// Sets the channel to shutdown if an error occurs.
5267    pub fn send(self) -> Result<(), fidl::Error> {
5268        let _result = self.send_raw();
5269        if _result.is_err() {
5270            self.control_handle.shutdown();
5271        }
5272        self.drop_without_shutdown();
5273        _result
5274    }
5275
5276    /// Similar to "send" but does not shutdown the channel if an error occurs.
5277    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
5278        let _result = self.send_raw();
5279        self.drop_without_shutdown();
5280        _result
5281    }
5282
5283    fn send_raw(&self) -> Result<(), fidl::Error> {
5284        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
5285            fidl::encoding::Flexible::new(()),
5286            self.tx_id,
5287            0x11ac2555cf575b54,
5288            fidl::encoding::DynamicFlags::FLEXIBLE,
5289        )
5290    }
5291}
5292
5293#[must_use = "FIDL methods require a response to be sent"]
5294#[derive(Debug)]
5295pub struct BufferCollectionGetNodeRefResponder {
5296    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5297    tx_id: u32,
5298}
5299
5300/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5301/// if the responder is dropped without sending a response, so that the client
5302/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5303impl std::ops::Drop for BufferCollectionGetNodeRefResponder {
5304    fn drop(&mut self) {
5305        self.control_handle.shutdown();
5306        // Safety: drops once, never accessed again
5307        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5308    }
5309}
5310
5311impl fidl::endpoints::Responder for BufferCollectionGetNodeRefResponder {
5312    type ControlHandle = BufferCollectionControlHandle;
5313
5314    fn control_handle(&self) -> &BufferCollectionControlHandle {
5315        &self.control_handle
5316    }
5317
5318    fn drop_without_shutdown(mut self) {
5319        // Safety: drops once, never accessed again due to mem::forget
5320        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5321        // Prevent Drop from running (which would shut down the channel)
5322        std::mem::forget(self);
5323    }
5324}
5325
5326impl BufferCollectionGetNodeRefResponder {
5327    /// Sends a response to the FIDL transaction.
5328    ///
5329    /// Sets the channel to shutdown if an error occurs.
5330    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5331        let _result = self.send_raw(payload);
5332        if _result.is_err() {
5333            self.control_handle.shutdown();
5334        }
5335        self.drop_without_shutdown();
5336        _result
5337    }
5338
5339    /// Similar to "send" but does not shutdown the channel if an error occurs.
5340    pub fn send_no_shutdown_on_err(
5341        self,
5342        mut payload: NodeGetNodeRefResponse,
5343    ) -> Result<(), fidl::Error> {
5344        let _result = self.send_raw(payload);
5345        self.drop_without_shutdown();
5346        _result
5347    }
5348
5349    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5350        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
5351            fidl::encoding::Flexible::new(&mut payload),
5352            self.tx_id,
5353            0x5b3d0e51614df053,
5354            fidl::encoding::DynamicFlags::FLEXIBLE,
5355        )
5356    }
5357}
5358
5359#[must_use = "FIDL methods require a response to be sent"]
5360#[derive(Debug)]
5361pub struct BufferCollectionIsAlternateForResponder {
5362    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5363    tx_id: u32,
5364}
5365
5366/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5367/// if the responder is dropped without sending a response, so that the client
5368/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5369impl std::ops::Drop for BufferCollectionIsAlternateForResponder {
5370    fn drop(&mut self) {
5371        self.control_handle.shutdown();
5372        // Safety: drops once, never accessed again
5373        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5374    }
5375}
5376
5377impl fidl::endpoints::Responder for BufferCollectionIsAlternateForResponder {
5378    type ControlHandle = BufferCollectionControlHandle;
5379
5380    fn control_handle(&self) -> &BufferCollectionControlHandle {
5381        &self.control_handle
5382    }
5383
5384    fn drop_without_shutdown(mut self) {
5385        // Safety: drops once, never accessed again due to mem::forget
5386        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5387        // Prevent Drop from running (which would shut down the channel)
5388        std::mem::forget(self);
5389    }
5390}
5391
5392impl BufferCollectionIsAlternateForResponder {
5393    /// Sends a response to the FIDL transaction.
5394    ///
5395    /// Sets the channel to shutdown if an error occurs.
5396    pub fn send(
5397        self,
5398        mut result: Result<&NodeIsAlternateForResponse, Error>,
5399    ) -> Result<(), fidl::Error> {
5400        let _result = self.send_raw(result);
5401        if _result.is_err() {
5402            self.control_handle.shutdown();
5403        }
5404        self.drop_without_shutdown();
5405        _result
5406    }
5407
5408    /// Similar to "send" but does not shutdown the channel if an error occurs.
5409    pub fn send_no_shutdown_on_err(
5410        self,
5411        mut result: Result<&NodeIsAlternateForResponse, Error>,
5412    ) -> Result<(), fidl::Error> {
5413        let _result = self.send_raw(result);
5414        self.drop_without_shutdown();
5415        _result
5416    }
5417
5418    fn send_raw(
5419        &self,
5420        mut result: Result<&NodeIsAlternateForResponse, Error>,
5421    ) -> Result<(), fidl::Error> {
5422        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5423            NodeIsAlternateForResponse,
5424            Error,
5425        >>(
5426            fidl::encoding::FlexibleResult::new(result),
5427            self.tx_id,
5428            0x3a58e00157e0825,
5429            fidl::encoding::DynamicFlags::FLEXIBLE,
5430        )
5431    }
5432}
5433
5434#[must_use = "FIDL methods require a response to be sent"]
5435#[derive(Debug)]
5436pub struct BufferCollectionGetBufferCollectionIdResponder {
5437    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5438    tx_id: u32,
5439}
5440
5441/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5442/// if the responder is dropped without sending a response, so that the client
5443/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5444impl std::ops::Drop for BufferCollectionGetBufferCollectionIdResponder {
5445    fn drop(&mut self) {
5446        self.control_handle.shutdown();
5447        // Safety: drops once, never accessed again
5448        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5449    }
5450}
5451
5452impl fidl::endpoints::Responder for BufferCollectionGetBufferCollectionIdResponder {
5453    type ControlHandle = BufferCollectionControlHandle;
5454
5455    fn control_handle(&self) -> &BufferCollectionControlHandle {
5456        &self.control_handle
5457    }
5458
5459    fn drop_without_shutdown(mut self) {
5460        // Safety: drops once, never accessed again due to mem::forget
5461        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5462        // Prevent Drop from running (which would shut down the channel)
5463        std::mem::forget(self);
5464    }
5465}
5466
5467impl BufferCollectionGetBufferCollectionIdResponder {
5468    /// Sends a response to the FIDL transaction.
5469    ///
5470    /// Sets the channel to shutdown if an error occurs.
5471    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5472        let _result = self.send_raw(payload);
5473        if _result.is_err() {
5474            self.control_handle.shutdown();
5475        }
5476        self.drop_without_shutdown();
5477        _result
5478    }
5479
5480    /// Similar to "send" but does not shutdown the channel if an error occurs.
5481    pub fn send_no_shutdown_on_err(
5482        self,
5483        mut payload: &NodeGetBufferCollectionIdResponse,
5484    ) -> Result<(), fidl::Error> {
5485        let _result = self.send_raw(payload);
5486        self.drop_without_shutdown();
5487        _result
5488    }
5489
5490    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5491        self.control_handle
5492            .inner
5493            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
5494                fidl::encoding::Flexible::new(payload),
5495                self.tx_id,
5496                0x77d19a494b78ba8c,
5497                fidl::encoding::DynamicFlags::FLEXIBLE,
5498            )
5499    }
5500}
5501
5502#[must_use = "FIDL methods require a response to be sent"]
5503#[derive(Debug)]
5504pub struct BufferCollectionWaitForAllBuffersAllocatedResponder {
5505    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5506    tx_id: u32,
5507}
5508
5509/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5510/// if the responder is dropped without sending a response, so that the client
5511/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5512impl std::ops::Drop for BufferCollectionWaitForAllBuffersAllocatedResponder {
5513    fn drop(&mut self) {
5514        self.control_handle.shutdown();
5515        // Safety: drops once, never accessed again
5516        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5517    }
5518}
5519
5520impl fidl::endpoints::Responder for BufferCollectionWaitForAllBuffersAllocatedResponder {
5521    type ControlHandle = BufferCollectionControlHandle;
5522
5523    fn control_handle(&self) -> &BufferCollectionControlHandle {
5524        &self.control_handle
5525    }
5526
5527    fn drop_without_shutdown(mut self) {
5528        // Safety: drops once, never accessed again due to mem::forget
5529        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5530        // Prevent Drop from running (which would shut down the channel)
5531        std::mem::forget(self);
5532    }
5533}
5534
5535impl BufferCollectionWaitForAllBuffersAllocatedResponder {
5536    /// Sends a response to the FIDL transaction.
5537    ///
5538    /// Sets the channel to shutdown if an error occurs.
5539    pub fn send(
5540        self,
5541        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5542    ) -> Result<(), fidl::Error> {
5543        let _result = self.send_raw(result);
5544        if _result.is_err() {
5545            self.control_handle.shutdown();
5546        }
5547        self.drop_without_shutdown();
5548        _result
5549    }
5550
5551    /// Similar to "send" but does not shutdown the channel if an error occurs.
5552    pub fn send_no_shutdown_on_err(
5553        self,
5554        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5555    ) -> Result<(), fidl::Error> {
5556        let _result = self.send_raw(result);
5557        self.drop_without_shutdown();
5558        _result
5559    }
5560
5561    fn send_raw(
5562        &self,
5563        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5564    ) -> Result<(), fidl::Error> {
5565        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5566            BufferCollectionWaitForAllBuffersAllocatedResponse,
5567            Error,
5568        >>(
5569            fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
5570            self.tx_id,
5571            0x62300344b61404e,
5572            fidl::encoding::DynamicFlags::FLEXIBLE,
5573        )
5574    }
5575}
5576
5577#[must_use = "FIDL methods require a response to be sent"]
5578#[derive(Debug)]
5579pub struct BufferCollectionCheckAllBuffersAllocatedResponder {
5580    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5581    tx_id: u32,
5582}
5583
5584/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5585/// if the responder is dropped without sending a response, so that the client
5586/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5587impl std::ops::Drop for BufferCollectionCheckAllBuffersAllocatedResponder {
5588    fn drop(&mut self) {
5589        self.control_handle.shutdown();
5590        // Safety: drops once, never accessed again
5591        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5592    }
5593}
5594
5595impl fidl::endpoints::Responder for BufferCollectionCheckAllBuffersAllocatedResponder {
5596    type ControlHandle = BufferCollectionControlHandle;
5597
5598    fn control_handle(&self) -> &BufferCollectionControlHandle {
5599        &self.control_handle
5600    }
5601
5602    fn drop_without_shutdown(mut self) {
5603        // Safety: drops once, never accessed again due to mem::forget
5604        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5605        // Prevent Drop from running (which would shut down the channel)
5606        std::mem::forget(self);
5607    }
5608}
5609
5610impl BufferCollectionCheckAllBuffersAllocatedResponder {
5611    /// Sends a response to the FIDL transaction.
5612    ///
5613    /// Sets the channel to shutdown if an error occurs.
5614    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5615        let _result = self.send_raw(result);
5616        if _result.is_err() {
5617            self.control_handle.shutdown();
5618        }
5619        self.drop_without_shutdown();
5620        _result
5621    }
5622
5623    /// Similar to "send" but does not shutdown the channel if an error occurs.
5624    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5625        let _result = self.send_raw(result);
5626        self.drop_without_shutdown();
5627        _result
5628    }
5629
5630    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5631        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5632            fidl::encoding::EmptyStruct,
5633            Error,
5634        >>(
5635            fidl::encoding::FlexibleResult::new(result),
5636            self.tx_id,
5637            0x35a5fe77ce939c10,
5638            fidl::encoding::DynamicFlags::FLEXIBLE,
5639        )
5640    }
5641}
5642
5643#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
5644pub struct BufferCollectionTokenMarker;
5645
5646impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenMarker {
5647    type Proxy = BufferCollectionTokenProxy;
5648    type RequestStream = BufferCollectionTokenRequestStream;
5649    #[cfg(target_os = "fuchsia")]
5650    type SynchronousProxy = BufferCollectionTokenSynchronousProxy;
5651
5652    const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionToken";
5653}
5654
5655pub trait BufferCollectionTokenProxyInterface: Send + Sync {
5656    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
5657    fn r#sync(&self) -> Self::SyncResponseFut;
5658    fn r#release(&self) -> Result<(), fidl::Error>;
5659    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
5660    fn r#set_debug_client_info(
5661        &self,
5662        payload: &NodeSetDebugClientInfoRequest,
5663    ) -> Result<(), fidl::Error>;
5664    fn r#set_debug_timeout_log_deadline(
5665        &self,
5666        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5667    ) -> Result<(), fidl::Error>;
5668    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
5669    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
5670        + Send;
5671    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
5672    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
5673        + Send;
5674    fn r#is_alternate_for(
5675        &self,
5676        payload: NodeIsAlternateForRequest,
5677    ) -> Self::IsAlternateForResponseFut;
5678    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
5679        + Send;
5680    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
5681    fn r#set_weak(&self) -> Result<(), fidl::Error>;
5682    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
5683    fn r#attach_node_tracking(
5684        &self,
5685        payload: NodeAttachNodeTrackingRequest,
5686    ) -> Result<(), fidl::Error>;
5687    type DuplicateSyncResponseFut: std::future::Future<
5688            Output = Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error>,
5689        > + Send;
5690    fn r#duplicate_sync(
5691        &self,
5692        payload: &BufferCollectionTokenDuplicateSyncRequest,
5693    ) -> Self::DuplicateSyncResponseFut;
5694    fn r#duplicate(
5695        &self,
5696        payload: BufferCollectionTokenDuplicateRequest,
5697    ) -> Result<(), fidl::Error>;
5698    fn r#set_dispensable(&self) -> Result<(), fidl::Error>;
5699    fn r#create_buffer_collection_token_group(
5700        &self,
5701        payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
5702    ) -> Result<(), fidl::Error>;
5703}
5704#[derive(Debug)]
5705#[cfg(target_os = "fuchsia")]
5706pub struct BufferCollectionTokenSynchronousProxy {
5707    client: fidl::client::sync::Client,
5708}
5709
5710#[cfg(target_os = "fuchsia")]
5711impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenSynchronousProxy {
5712    type Proxy = BufferCollectionTokenProxy;
5713    type Protocol = BufferCollectionTokenMarker;
5714
5715    fn from_channel(inner: fidl::Channel) -> Self {
5716        Self::new(inner)
5717    }
5718
5719    fn into_channel(self) -> fidl::Channel {
5720        self.client.into_channel()
5721    }
5722
5723    fn as_channel(&self) -> &fidl::Channel {
5724        self.client.as_channel()
5725    }
5726}
5727
5728#[cfg(target_os = "fuchsia")]
5729impl BufferCollectionTokenSynchronousProxy {
5730    pub fn new(channel: fidl::Channel) -> Self {
5731        let protocol_name =
5732            <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
5733        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
5734    }
5735
5736    pub fn into_channel(self) -> fidl::Channel {
5737        self.client.into_channel()
5738    }
5739
5740    /// Waits until an event arrives and returns it. It is safe for other
5741    /// threads to make concurrent requests while waiting for an event.
5742    pub fn wait_for_event(
5743        &self,
5744        deadline: zx::MonotonicInstant,
5745    ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
5746        BufferCollectionTokenEvent::decode(self.client.wait_for_event(deadline)?)
5747    }
5748
5749    /// Ensure that previous messages have been received server side. This is
5750    /// particularly useful after previous messages that created new tokens,
5751    /// because a token must be known to the sysmem server before sending the
5752    /// token to another participant.
5753    ///
5754    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
5755    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
5756    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
5757    /// to mitigate the possibility of a hostile/fake
5758    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
5759    /// Another way is to pass the token to
5760    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
5761    /// the token as part of exchanging it for a
5762    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
5763    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
5764    /// of stalling.
5765    ///
5766    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
5767    /// and then starting and completing a `Sync`, it's then safe to send the
5768    /// `BufferCollectionToken` client ends to other participants knowing the
5769    /// server will recognize the tokens when they're sent by the other
5770    /// participants to sysmem in a
5771    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
5772    /// efficient way to create tokens while avoiding unnecessary round trips.
5773    ///
5774    /// Other options include waiting for each
5775    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
5776    /// individually (using separate call to `Sync` after each), or calling
5777    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
5778    /// converted to a `BufferCollection` via
5779    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
5780    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
5781    /// the sync step and can create multiple tokens at once.
5782    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
5783        let _response = self.client.send_query::<
5784            fidl::encoding::EmptyPayload,
5785            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
5786        >(
5787            (),
5788            0x11ac2555cf575b54,
5789            fidl::encoding::DynamicFlags::FLEXIBLE,
5790            ___deadline,
5791        )?
5792        .into_result::<BufferCollectionTokenMarker>("sync")?;
5793        Ok(_response)
5794    }
5795
5796    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
5797    ///
5798    /// Normally a participant will convert a `BufferCollectionToken` into a
5799    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
5800    /// `Release` via the token (and then close the channel immediately or
5801    /// shortly later in response to server closing the server end), which
5802    /// avoids causing buffer collection failure. Without a prior `Release`,
5803    /// closing the `BufferCollectionToken` client end will cause buffer
5804    /// collection failure.
5805    ///
5806    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
5807    ///
5808    /// By default the server handles unexpected closure of a
5809    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
5810    /// first) by failing the buffer collection. Partly this is to expedite
5811    /// closing VMO handles to reclaim memory when any participant fails. If a
5812    /// participant would like to cleanly close a `BufferCollection` without
5813    /// causing buffer collection failure, the participant can send `Release`
5814    /// before closing the `BufferCollection` client end. The `Release` can
5815    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
5816    /// buffer collection won't require constraints from this node in order to
5817    /// allocate. If after `SetConstraints`, the constraints are retained and
5818    /// aggregated, despite the lack of `BufferCollection` connection at the
5819    /// time of constraints aggregation.
5820    ///
5821    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
5822    ///
5823    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
5824    /// end (without `Release` first) will trigger failure of the buffer
5825    /// collection. To close a `BufferCollectionTokenGroup` channel without
5826    /// failing the buffer collection, ensure that AllChildrenPresent() has been
5827    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
5828    /// client end.
5829    ///
5830    /// If `Release` occurs before
5831    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
5832    /// buffer collection will fail (triggered by reception of `Release` without
5833    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
5834    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
5835    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
5836    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
5837    /// close requires `AllChildrenPresent` (if not already sent), then
5838    /// `Release`, then close client end.
5839    ///
5840    /// If `Release` occurs after `AllChildrenPresent`, the children and all
5841    /// their constraints remain intact (just as they would if the
5842    /// `BufferCollectionTokenGroup` channel had remained open), and the client
5843    /// end close doesn't trigger buffer collection failure.
5844    ///
5845    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
5846    ///
5847    /// For brevity, the per-channel-protocol paragraphs above ignore the
5848    /// separate failure domain created by
5849    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
5850    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
5851    /// unexpectedly closes (without `Release` first) and that client end is
5852    /// under a failure domain, instead of failing the whole buffer collection,
5853    /// the failure domain is failed, but the buffer collection itself is
5854    /// isolated from failure of the failure domain. Such failure domains can be
5855    /// nested, in which case only the inner-most failure domain in which the
5856    /// `Node` resides fails.
5857    pub fn r#release(&self) -> Result<(), fidl::Error> {
5858        self.client.send::<fidl::encoding::EmptyPayload>(
5859            (),
5860            0x6a5cae7d6d6e04c6,
5861            fidl::encoding::DynamicFlags::FLEXIBLE,
5862        )
5863    }
5864
5865    /// Set a name for VMOs in this buffer collection.
5866    ///
5867    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
5868    /// will be truncated to fit. The name of the vmo will be suffixed with the
5869    /// buffer index within the collection (if the suffix fits within
5870    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
5871    /// listed in the inspect data.
5872    ///
5873    /// The name only affects VMOs allocated after the name is set; this call
5874    /// does not rename existing VMOs. If multiple clients set different names
5875    /// then the larger priority value will win. Setting a new name with the
5876    /// same priority as a prior name doesn't change the name.
5877    ///
5878    /// All table fields are currently required.
5879    ///
5880    /// + request `priority` The name is only set if this is the first `SetName`
5881    ///   or if `priority` is greater than any previous `priority` value in
5882    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
5883    /// + request `name` The name for VMOs created under this buffer collection.
5884    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
5885        self.client.send::<NodeSetNameRequest>(
5886            payload,
5887            0xb41f1624f48c1e9,
5888            fidl::encoding::DynamicFlags::FLEXIBLE,
5889        )
5890    }
5891
5892    /// Set information about the current client that can be used by sysmem to
5893    /// help diagnose leaking memory and allocation stalls waiting for a
5894    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
5895    ///
5896    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
5897    /// `Node`(s) derived from this `Node`, unless overriden by
5898    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
5899    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
5900    ///
5901    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
5902    /// `Allocator` is the most efficient way to ensure that all
5903    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
5904    /// set, and is also more efficient than separately sending the same debug
5905    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
5906    /// created [`fuchsia.sysmem2/Node`].
5907    ///
5908    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
5909    /// indicate which client is closing their channel first, leading to subtree
5910    /// failure (which can be normal if the purpose of the subtree is over, but
5911    /// if happening earlier than expected, the client-channel-specific name can
5912    /// help diagnose where the failure is first coming from, from sysmem's
5913    /// point of view).
5914    ///
5915    /// All table fields are currently required.
5916    ///
5917    /// + request `name` This can be an arbitrary string, but the current
5918    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
5919    /// + request `id` This can be an arbitrary id, but the current process ID
5920    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
5921    pub fn r#set_debug_client_info(
5922        &self,
5923        mut payload: &NodeSetDebugClientInfoRequest,
5924    ) -> Result<(), fidl::Error> {
5925        self.client.send::<NodeSetDebugClientInfoRequest>(
5926            payload,
5927            0x5cde8914608d99b1,
5928            fidl::encoding::DynamicFlags::FLEXIBLE,
5929        )
5930    }
5931
5932    /// Sysmem logs a warning if sysmem hasn't seen
5933    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
5934    /// within 5 seconds after creation of a new collection.
5935    ///
5936    /// Clients can call this method to change when the log is printed. If
5937    /// multiple client set the deadline, it's unspecified which deadline will
5938    /// take effect.
5939    ///
5940    /// In most cases the default works well.
5941    ///
5942    /// All table fields are currently required.
5943    ///
5944    /// + request `deadline` The time at which sysmem will start trying to log
5945    ///   the warning, unless all constraints are with sysmem by then.
5946    pub fn r#set_debug_timeout_log_deadline(
5947        &self,
5948        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5949    ) -> Result<(), fidl::Error> {
5950        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
5951            payload,
5952            0x716b0af13d5c0806,
5953            fidl::encoding::DynamicFlags::FLEXIBLE,
5954        )
5955    }
5956
5957    /// This enables verbose logging for the buffer collection.
5958    ///
5959    /// Verbose logging includes constraints set via
5960    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
5961    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
5962    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
5963    /// the tree of `Node`(s).
5964    ///
5965    /// Normally sysmem prints only a single line complaint when aggregation
5966    /// fails, with just the specific detailed reason that aggregation failed,
5967    /// with little surrounding context.  While this is often enough to diagnose
5968    /// a problem if only a small change was made and everything was working
5969    /// before the small change, it's often not particularly helpful for getting
5970    /// a new buffer collection to work for the first time.  Especially with
5971    /// more complex trees of nodes, involving things like
5972    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
5973    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
5974    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
5975    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
5976    /// looks like and why it's failing a logical allocation, or why a tree or
5977    /// subtree is failing sooner than expected.
5978    ///
5979    /// The intent of the extra logging is to be acceptable from a performance
5980    /// point of view, under the assumption that verbose logging is only enabled
5981    /// on a low number of buffer collections. If we're not tracking down a bug,
5982    /// we shouldn't send this message.
5983    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
5984        self.client.send::<fidl::encoding::EmptyPayload>(
5985            (),
5986            0x5209c77415b4dfad,
5987            fidl::encoding::DynamicFlags::FLEXIBLE,
5988        )
5989    }
5990
5991    /// This gets a handle that can be used as a parameter to
5992    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
5993    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
5994    /// client obtained this handle from this `Node`.
5995    ///
5996    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
5997    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
5998    /// despite the two calls typically being on different channels.
5999    ///
6000    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6001    ///
6002    /// All table fields are currently required.
6003    ///
6004    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6005    ///   different `Node` channel, to prove that the client obtained the handle
6006    ///   from this `Node`.
6007    pub fn r#get_node_ref(
6008        &self,
6009        ___deadline: zx::MonotonicInstant,
6010    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
6011        let _response = self.client.send_query::<
6012            fidl::encoding::EmptyPayload,
6013            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
6014        >(
6015            (),
6016            0x5b3d0e51614df053,
6017            fidl::encoding::DynamicFlags::FLEXIBLE,
6018            ___deadline,
6019        )?
6020        .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
6021        Ok(_response)
6022    }
6023
6024    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6025    /// rooted at a different child token of a common parent
6026    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6027    /// passed-in `node_ref`.
6028    ///
6029    /// This call is for assisting with admission control de-duplication, and
6030    /// with debugging.
6031    ///
6032    /// The `node_ref` must be obtained using
6033    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6034    ///
6035    /// The `node_ref` can be a duplicated handle; it's not necessary to call
6036    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6037    ///
6038    /// If a calling token may not actually be a valid token at all due to a
6039    /// potentially hostile/untrusted provider of the token, call
6040    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6041    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6042    /// never responds due to a calling token not being a real token (not really
6043    /// talking to sysmem).  Another option is to call
6044    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6045    /// which also validates the token along with converting it to a
6046    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6047    ///
6048    /// All table fields are currently required.
6049    ///
6050    /// - response `is_alternate`
6051    ///   - true: The first parent node in common between the calling node and
6052    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
6053    ///     that the calling `Node` and the `node_ref` `Node` will not have both
6054    ///     their constraints apply - rather sysmem will choose one or the other
6055    ///     of the constraints - never both.  This is because only one child of
6056    ///     a `BufferCollectionTokenGroup` is selected during logical
6057    ///     allocation, with only that one child's subtree contributing to
6058    ///     constraints aggregation.
6059    ///   - false: The first parent node in common between the calling `Node`
6060    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6061    ///     Currently, this means the first parent node in common is a
6062    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
6063    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
6064    ///     `Node` may have both their constraints apply during constraints
6065    ///     aggregation of the logical allocation, if both `Node`(s) are
6066    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6067    ///     this case, there is no `BufferCollectionTokenGroup` that will
6068    ///     directly prevent the two `Node`(s) from both being selected and
6069    ///     their constraints both aggregated, but even when false, one or both
6070    ///     `Node`(s) may still be eliminated from consideration if one or both
6071    ///     `Node`(s) has a direct or indirect parent
6072    ///     `BufferCollectionTokenGroup` which selects a child subtree other
6073    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
6074    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6075    ///   associated with the same buffer collection as the calling `Node`.
6076    ///   Another reason for this error is if the `node_ref` is an
6077    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6078    ///   a real `node_ref` obtained from `GetNodeRef`.
6079    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6080    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6081    ///   the needed rights expected on a real `node_ref`.
6082    /// * No other failing status codes are returned by this call.  However,
6083    ///   sysmem may add additional codes in future, so the client should have
6084    ///   sensible default handling for any failing status code.
6085    pub fn r#is_alternate_for(
6086        &self,
6087        mut payload: NodeIsAlternateForRequest,
6088        ___deadline: zx::MonotonicInstant,
6089    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
6090        let _response = self.client.send_query::<
6091            NodeIsAlternateForRequest,
6092            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
6093        >(
6094            &mut payload,
6095            0x3a58e00157e0825,
6096            fidl::encoding::DynamicFlags::FLEXIBLE,
6097            ___deadline,
6098        )?
6099        .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
6100        Ok(_response.map(|x| x))
6101    }
6102
6103    /// Get the buffer collection ID. This ID is also available from
6104    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6105    /// within the collection).
6106    ///
6107    /// This call is mainly useful in situations where we can't convey a
6108    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6109    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6110    /// handle, which can be joined back up with a `BufferCollection` client end
6111    /// that was created via a different path. Prefer to convey a
6112    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6113    ///
6114    /// Trusting a `buffer_collection_id` value from a source other than sysmem
6115    /// is analogous to trusting a koid value from a source other than zircon.
6116    /// Both should be avoided unless really necessary, and both require
6117    /// caution. In some situations it may be reasonable to refer to a
6118    /// pre-established `BufferCollection` by `buffer_collection_id` via a
6119    /// protocol for efficiency reasons, but an incoming value purporting to be
6120    /// a `buffer_collection_id` is not sufficient alone to justify granting the
6121    /// sender of the `buffer_collection_id` any capability. The sender must
6122    /// first prove to a receiver that the sender has/had a VMO or has/had a
6123    /// `BufferCollectionToken` to the same collection by sending a handle that
6124    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6125    /// `buffer_collection_id` value. The receiver should take care to avoid
6126    /// assuming that a sender had a `BufferCollectionToken` in cases where the
6127    /// sender has only proven that the sender had a VMO.
6128    ///
6129    /// - response `buffer_collection_id` This ID is unique per buffer
6130    ///   collection per boot. Each buffer is uniquely identified by the
6131    ///   `buffer_collection_id` and `buffer_index` together.
6132    pub fn r#get_buffer_collection_id(
6133        &self,
6134        ___deadline: zx::MonotonicInstant,
6135    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
6136        let _response = self.client.send_query::<
6137            fidl::encoding::EmptyPayload,
6138            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
6139        >(
6140            (),
6141            0x77d19a494b78ba8c,
6142            fidl::encoding::DynamicFlags::FLEXIBLE,
6143            ___deadline,
6144        )?
6145        .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
6146        Ok(_response)
6147    }
6148
6149    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6150    /// created after this message to weak, which means that a client's `Node`
6151    /// client end (or a child created after this message) is not alone
6152    /// sufficient to keep allocated VMOs alive.
6153    ///
6154    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6155    /// `close_weak_asap`.
6156    ///
6157    /// This message is only permitted before the `Node` becomes ready for
6158    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6159    ///   * `BufferCollectionToken`: any time
6160    ///   * `BufferCollection`: before `SetConstraints`
6161    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6162    ///
6163    /// Currently, no conversion from strong `Node` to weak `Node` after ready
6164    /// for allocation is provided, but a client can simulate that by creating
6165    /// an additional `Node` before allocation and setting that additional
6166    /// `Node` to weak, and then potentially at some point later sending
6167    /// `Release` and closing the client end of the client's strong `Node`, but
6168    /// keeping the client's weak `Node`.
6169    ///
6170    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6171    /// collection failure (all `Node` client end(s) will see
6172    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6173    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6174    /// this situation until all `Node`(s) are ready for allocation. For initial
6175    /// allocation to succeed, at least one strong `Node` is required to exist
6176    /// at allocation time, but after that client receives VMO handles, that
6177    /// client can `BufferCollection.Release` and close the client end without
6178    /// causing this type of failure.
6179    ///
6180    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6181    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6182    /// separately as appropriate.
6183    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6184        self.client.send::<fidl::encoding::EmptyPayload>(
6185            (),
6186            0x22dd3ea514eeffe1,
6187            fidl::encoding::DynamicFlags::FLEXIBLE,
6188        )
6189    }
6190
6191    /// This indicates to sysmem that the client is prepared to pay attention to
6192    /// `close_weak_asap`.
6193    ///
6194    /// If sent, this message must be before
6195    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6196    ///
6197    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6198    /// send this message before `WaitForAllBuffersAllocated`, or a parent
6199    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6200    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6201    /// trigger buffer collection failure.
6202    ///
6203    /// This message is necessary because weak sysmem VMOs have not always been
6204    /// a thing, so older clients are not aware of the need to pay attention to
6205    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6206    /// sysmem weak VMO handles asap. By having this message and requiring
6207    /// participants to indicate their acceptance of this aspect of the overall
6208    /// protocol, we avoid situations where an older client is delivered a weak
6209    /// VMO without any way for sysmem to get that VMO to close quickly later
6210    /// (and on a per-buffer basis).
6211    ///
6212    /// A participant that doesn't handle `close_weak_asap` and also doesn't
6213    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
6214    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
6215    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
6216    /// same participant has a child/delegate which does retrieve VMOs, that
6217    /// child/delegate will need to send `SetWeakOk` before
6218    /// `WaitForAllBuffersAllocated`.
6219    ///
6220    /// + request `for_child_nodes_also` If present and true, this means direct
6221    ///   child nodes of this node created after this message plus all
6222    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
6223    ///   those nodes. Any child node of this node that was created before this
6224    ///   message is not included. This setting is "sticky" in the sense that a
6225    ///   subsequent `SetWeakOk` without this bool set to true does not reset
6226    ///   the server-side bool. If this creates a problem for a participant, a
6227    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
6228    ///   tokens instead, as appropriate. A participant should only set
6229    ///   `for_child_nodes_also` true if the participant can really promise to
6230    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
6231    ///   weak VMO handles held by participants holding the corresponding child
6232    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
6233    ///   which are using sysmem(1) can be weak, despite the clients of those
6234    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
6235    ///   direct way to find out about `close_weak_asap`. This only applies to
6236    ///   descendents of this `Node` which are using sysmem(1), not to this
6237    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
6238    ///   token, which will fail allocation unless an ancestor of this `Node`
6239    ///   specified `for_child_nodes_also` true.
6240    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
6241        self.client.send::<NodeSetWeakOkRequest>(
6242            &mut payload,
6243            0x38a44fc4d7724be9,
6244            fidl::encoding::DynamicFlags::FLEXIBLE,
6245        )
6246    }
6247
6248    /// The server_end will be closed after this `Node` and any child nodes have
6249    /// have released their buffer counts, making those counts available for
6250    /// reservation by a different `Node` via
6251    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
6252    ///
6253    /// The `Node` buffer counts may not be released until the entire tree of
6254    /// `Node`(s) is closed or failed, because
6255    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
6256    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
6257    /// `Node` buffer counts remain reserved until the orphaned node is later
6258    /// cleaned up.
6259    ///
6260    /// If the `Node` exceeds a fairly large number of attached eventpair server
6261    /// ends, a log message will indicate this and the `Node` (and the
6262    /// appropriate) sub-tree will fail.
6263    ///
6264    /// The `server_end` will remain open when
6265    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
6266    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
6267    /// [`fuchsia.sysmem2/BufferCollection`].
6268    ///
6269    /// This message can also be used with a
6270    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6271    pub fn r#attach_node_tracking(
6272        &self,
6273        mut payload: NodeAttachNodeTrackingRequest,
6274    ) -> Result<(), fidl::Error> {
6275        self.client.send::<NodeAttachNodeTrackingRequest>(
6276            &mut payload,
6277            0x3f22f2a293d3cdac,
6278            fidl::encoding::DynamicFlags::FLEXIBLE,
6279        )
6280    }
6281
6282    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
6283    /// one, referring to the same buffer collection.
6284    ///
6285    /// The created tokens are children of this token in the
6286    /// [`fuchsia.sysmem2/Node`] heirarchy.
6287    ///
6288    /// This method can be used to add more participants, by transferring the
6289    /// newly created tokens to additional participants.
6290    ///
6291    /// A new token will be returned for each entry in the
6292    /// `rights_attenuation_masks` array.
6293    ///
6294    /// If the called token may not actually be a valid token due to a
6295    /// potentially hostile/untrusted provider of the token, consider using
6296    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6297    /// instead of potentially getting stuck indefinitely if
6298    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
6299    /// due to the calling token not being a real token.
6300    ///
6301    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
6302    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
6303    /// method, because the sync step is included in this call, at the cost of a
6304    /// round trip during this call.
6305    ///
6306    /// All tokens must be turned in to sysmem via
6307    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6308    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6309    /// successfully allocate buffers (or to logically allocate buffers in the
6310    /// case of subtrees involving
6311    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
6312    ///
6313    /// All table fields are currently required.
6314    ///
6315    /// + request `rights_attenuation_mask` In each entry of
6316    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
6317    ///   in the buffer VMO rights obtainable via the corresponding returned
6318    ///   token. This allows an initiator or intermediary participant to
6319    ///   attenuate the rights available to a participant. This does not allow a
6320    ///   participant to gain rights that the participant doesn't already have.
6321    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
6322    ///   attenuation should be applied.
6323    /// - response `tokens` The client ends of each newly created token.
6324    pub fn r#duplicate_sync(
6325        &self,
6326        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
6327        ___deadline: zx::MonotonicInstant,
6328    ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
6329        let _response = self.client.send_query::<
6330            BufferCollectionTokenDuplicateSyncRequest,
6331            fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
6332        >(
6333            payload,
6334            0x1c1af9919d1ca45c,
6335            fidl::encoding::DynamicFlags::FLEXIBLE,
6336            ___deadline,
6337        )?
6338        .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
6339        Ok(_response)
6340    }
6341
6342    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
6343    /// one, referring to the same buffer collection.
6344    ///
6345    /// The created token is a child of this token in the
6346    /// [`fuchsia.sysmem2/Node`] heirarchy.
6347    ///
6348    /// This method can be used to add a participant, by transferring the newly
6349    /// created token to another participant.
6350    ///
6351    /// This one-way message can be used instead of the two-way
6352    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
6353    /// performance sensitive cases where it would be undesireable to wait for
6354    /// sysmem to respond to
6355    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
6356    /// client code isn't structured to make it easy to duplicate all the needed
6357    /// tokens at once.
6358    ///
6359    /// After sending one or more `Duplicate` messages, and before sending the
6360    /// newly created child tokens to other participants (or to other
6361    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
6362    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
6363    /// `Sync` call can be made on the token, or on the `BufferCollection`
6364    /// obtained by passing this token to `BindSharedCollection`.  Either will
6365    /// ensure that the server knows about the tokens created via `Duplicate`
6366    /// before the other participant sends the token to the server via separate
6367    /// `Allocator` channel.
6368    ///
6369    /// All tokens must be turned in via
6370    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6371    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6372    /// successfully allocate buffers.
6373    ///
6374    /// All table fields are currently required.
6375    ///
6376    /// + request `rights_attenuation_mask` The rights bits that are zero in
6377    ///   this mask will be absent in the buffer VMO rights obtainable via the
6378    ///   client end of `token_request`. This allows an initiator or
6379    ///   intermediary participant to attenuate the rights available to a
6380    ///   delegate participant. This does not allow a participant to gain rights
6381    ///   that the participant doesn't already have. The value
6382    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
6383    ///   should be applied.
6384    ///   + These values for rights_attenuation_mask result in no attenuation:
6385    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
6386    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
6387    ///       computed)
6388    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
6389    /// + request `token_request` is the server end of a `BufferCollectionToken`
6390    ///   channel. The client end of this channel acts as another participant in
6391    ///   the shared buffer collection.
6392    pub fn r#duplicate(
6393        &self,
6394        mut payload: BufferCollectionTokenDuplicateRequest,
6395    ) -> Result<(), fidl::Error> {
6396        self.client.send::<BufferCollectionTokenDuplicateRequest>(
6397            &mut payload,
6398            0x73e78f92ee7fb887,
6399            fidl::encoding::DynamicFlags::FLEXIBLE,
6400        )
6401    }
6402
6403    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
6404    ///
6405    /// When the `BufferCollectionToken` is converted to a
6406    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
6407    /// the `BufferCollection` also.
6408    ///
6409    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
6410    /// client end without having sent
6411    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
6412    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
6413    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
6414    /// to the root `Node`, which fails the whole buffer collection. In
6415    /// contrast, a dispensable `Node` can fail after buffers are allocated
6416    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
6417    /// heirarchy.
6418    ///
6419    /// The dispensable `Node` participates in constraints aggregation along
6420    /// with its parent before buffer allocation. If the dispensable `Node`
6421    /// fails before buffers are allocated, the failure propagates to the
6422    /// dispensable `Node`'s parent.
6423    ///
6424    /// After buffers are allocated, failure of the dispensable `Node` (or any
6425    /// child of the dispensable `Node`) does not propagate to the dispensable
6426    /// `Node`'s parent. Failure does propagate from a normal child of a
6427    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
6428    /// blocked from reaching its parent if the child is attached using
6429    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
6430    /// dispensable and the failure occurred after allocation.
6431    ///
6432    /// A dispensable `Node` can be used in cases where a participant needs to
6433    /// provide constraints, but after buffers are allocated, the participant
6434    /// can fail without causing buffer collection failure from the parent
6435    /// `Node`'s point of view.
6436    ///
6437    /// In contrast, `BufferCollection.AttachToken` can be used to create a
6438    /// `BufferCollectionToken` which does not participate in constraints
6439    /// aggregation with its parent `Node`, and whose failure at any time does
6440    /// not propagate to its parent `Node`, and whose potential delay providing
6441    /// constraints does not prevent the parent `Node` from completing its
6442    /// buffer allocation.
6443    ///
6444    /// An initiator (creator of the root `Node` using
6445    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
6446    /// scenarios choose to initially use a dispensable `Node` for a first
6447    /// instance of a participant, and then later if the first instance of that
6448    /// participant fails, a new second instance of that participant my be given
6449    /// a `BufferCollectionToken` created with `AttachToken`.
6450    ///
6451    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
6452    /// shortly before sending the dispensable `BufferCollectionToken` to a
6453    /// delegate participant. Because `SetDispensable` prevents propagation of
6454    /// child `Node` failure to parent `Node`(s), if the client was relying on
6455    /// noticing child failure via failure of the parent `Node` retained by the
6456    /// client, the client may instead need to notice failure via other means.
6457    /// If other means aren't available/convenient, the client can instead
6458    /// retain the dispensable `Node` and create a child `Node` under that to
6459    /// send to the delegate participant, retaining this `Node` in order to
6460    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
6461    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
6462    /// (e.g. starting a new instance of the delegate participant and handing it
6463    /// a `BufferCollectionToken` created using
6464    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
6465    /// and clean up in a client-specific way).
6466    ///
6467    /// While it is possible (and potentially useful) to `SetDispensable` on a
6468    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
6469    /// to later replace a failed dispensable `Node` that was a direct child of
6470    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
6471    /// (since there's no `AttachToken` on a group). Instead, to enable
6472    /// `AttachToken` replacement in this case, create an additional
6473    /// non-dispensable token that's a direct child of the group and make the
6474    /// existing dispensable token a child of the additional token.  This way,
6475    /// the additional token that is a direct child of the group has
6476    /// `BufferCollection.AttachToken` which can be used to replace the failed
6477    /// dispensable token.
6478    ///
6479    /// `SetDispensable` on an already-dispensable token is idempotent.
6480    pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
6481        self.client.send::<fidl::encoding::EmptyPayload>(
6482            (),
6483            0x228acf979254df8b,
6484            fidl::encoding::DynamicFlags::FLEXIBLE,
6485        )
6486    }
6487
6488    /// Create a logical OR among a set of tokens, called a
6489    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6490    ///
6491    /// Most sysmem clients and many participants don't need to care about this
6492    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
6493    /// a participant wants to attempt to include one set of delegate
6494    /// participants, but if constraints don't combine successfully that way,
6495    /// fall back to a different (possibly overlapping) set of delegate
6496    /// participants, and/or fall back to a less demanding strategy (in terms of
6497    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
6498    /// across all involved delegate participants). In such cases, a
6499    /// `BufferCollectionTokenGroup` is useful.
6500    ///
6501    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
6502    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
6503    /// which are not selected during aggregation will fail (close), which a
6504    /// potential participant should notice when their `BufferCollection`
6505    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
6506    /// clean up the speculative usage that didn't end up happening (this is
6507    /// simimlar to a normal `BufferCollection` server end closing on failure to
6508    /// allocate a logical buffer collection or later async failure of a buffer
6509    /// collection).
6510    ///
6511    /// See comments on protocol `BufferCollectionTokenGroup`.
6512    ///
6513    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
6514    /// applied to the whole group can be achieved with a
6515    /// `BufferCollectionToken` for this purpose as a direct parent of the
6516    /// `BufferCollectionTokenGroup`.
6517    ///
6518    /// All table fields are currently required.
6519    ///
6520    /// + request `group_request` The server end of a
6521    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
6522    pub fn r#create_buffer_collection_token_group(
6523        &self,
6524        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
6525    ) -> Result<(), fidl::Error> {
6526        self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
6527            &mut payload,
6528            0x30f8d48e77bd36f2,
6529            fidl::encoding::DynamicFlags::FLEXIBLE,
6530        )
6531    }
6532}
6533
6534#[cfg(target_os = "fuchsia")]
6535impl From<BufferCollectionTokenSynchronousProxy> for zx::Handle {
6536    fn from(value: BufferCollectionTokenSynchronousProxy) -> Self {
6537        value.into_channel().into()
6538    }
6539}
6540
6541#[cfg(target_os = "fuchsia")]
6542impl From<fidl::Channel> for BufferCollectionTokenSynchronousProxy {
6543    fn from(value: fidl::Channel) -> Self {
6544        Self::new(value)
6545    }
6546}
6547
6548#[cfg(target_os = "fuchsia")]
6549impl fidl::endpoints::FromClient for BufferCollectionTokenSynchronousProxy {
6550    type Protocol = BufferCollectionTokenMarker;
6551
6552    fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>) -> Self {
6553        Self::new(value.into_channel())
6554    }
6555}
6556
6557#[derive(Debug, Clone)]
6558pub struct BufferCollectionTokenProxy {
6559    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
6560}
6561
6562impl fidl::endpoints::Proxy for BufferCollectionTokenProxy {
6563    type Protocol = BufferCollectionTokenMarker;
6564
6565    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
6566        Self::new(inner)
6567    }
6568
6569    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
6570        self.client.into_channel().map_err(|client| Self { client })
6571    }
6572
6573    fn as_channel(&self) -> &::fidl::AsyncChannel {
6574        self.client.as_channel()
6575    }
6576}
6577
6578impl BufferCollectionTokenProxy {
6579    /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionToken.
6580    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
6581        let protocol_name =
6582            <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
6583        Self { client: fidl::client::Client::new(channel, protocol_name) }
6584    }
6585
6586    /// Get a Stream of events from the remote end of the protocol.
6587    ///
6588    /// # Panics
6589    ///
6590    /// Panics if the event stream was already taken.
6591    pub fn take_event_stream(&self) -> BufferCollectionTokenEventStream {
6592        BufferCollectionTokenEventStream { event_receiver: self.client.take_event_receiver() }
6593    }
6594
6595    /// Ensure that previous messages have been received server side. This is
6596    /// particularly useful after previous messages that created new tokens,
6597    /// because a token must be known to the sysmem server before sending the
6598    /// token to another participant.
6599    ///
6600    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
6601    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
6602    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
6603    /// to mitigate the possibility of a hostile/fake
6604    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
6605    /// Another way is to pass the token to
6606    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
6607    /// the token as part of exchanging it for a
6608    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
6609    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
6610    /// of stalling.
6611    ///
6612    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
6613    /// and then starting and completing a `Sync`, it's then safe to send the
6614    /// `BufferCollectionToken` client ends to other participants knowing the
6615    /// server will recognize the tokens when they're sent by the other
6616    /// participants to sysmem in a
6617    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
6618    /// efficient way to create tokens while avoiding unnecessary round trips.
6619    ///
6620    /// Other options include waiting for each
6621    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
6622    /// individually (using separate call to `Sync` after each), or calling
6623    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
6624    /// converted to a `BufferCollection` via
6625    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
6626    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
6627    /// the sync step and can create multiple tokens at once.
6628    pub fn r#sync(
6629        &self,
6630    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
6631        BufferCollectionTokenProxyInterface::r#sync(self)
6632    }
6633
6634    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
6635    ///
6636    /// Normally a participant will convert a `BufferCollectionToken` into a
6637    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
6638    /// `Release` via the token (and then close the channel immediately or
6639    /// shortly later in response to server closing the server end), which
6640    /// avoids causing buffer collection failure. Without a prior `Release`,
6641    /// closing the `BufferCollectionToken` client end will cause buffer
6642    /// collection failure.
6643    ///
6644    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
6645    ///
6646    /// By default the server handles unexpected closure of a
6647    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
6648    /// first) by failing the buffer collection. Partly this is to expedite
6649    /// closing VMO handles to reclaim memory when any participant fails. If a
6650    /// participant would like to cleanly close a `BufferCollection` without
6651    /// causing buffer collection failure, the participant can send `Release`
6652    /// before closing the `BufferCollection` client end. The `Release` can
6653    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
6654    /// buffer collection won't require constraints from this node in order to
6655    /// allocate. If after `SetConstraints`, the constraints are retained and
6656    /// aggregated, despite the lack of `BufferCollection` connection at the
6657    /// time of constraints aggregation.
6658    ///
6659    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
6660    ///
6661    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
6662    /// end (without `Release` first) will trigger failure of the buffer
6663    /// collection. To close a `BufferCollectionTokenGroup` channel without
6664    /// failing the buffer collection, ensure that AllChildrenPresent() has been
6665    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
6666    /// client end.
6667    ///
6668    /// If `Release` occurs before
6669    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
6670    /// buffer collection will fail (triggered by reception of `Release` without
6671    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
6672    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
6673    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
6674    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
6675    /// close requires `AllChildrenPresent` (if not already sent), then
6676    /// `Release`, then close client end.
6677    ///
6678    /// If `Release` occurs after `AllChildrenPresent`, the children and all
6679    /// their constraints remain intact (just as they would if the
6680    /// `BufferCollectionTokenGroup` channel had remained open), and the client
6681    /// end close doesn't trigger buffer collection failure.
6682    ///
6683    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
6684    ///
6685    /// For brevity, the per-channel-protocol paragraphs above ignore the
6686    /// separate failure domain created by
6687    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
6688    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
6689    /// unexpectedly closes (without `Release` first) and that client end is
6690    /// under a failure domain, instead of failing the whole buffer collection,
6691    /// the failure domain is failed, but the buffer collection itself is
6692    /// isolated from failure of the failure domain. Such failure domains can be
6693    /// nested, in which case only the inner-most failure domain in which the
6694    /// `Node` resides fails.
6695    pub fn r#release(&self) -> Result<(), fidl::Error> {
6696        BufferCollectionTokenProxyInterface::r#release(self)
6697    }
6698
6699    /// Set a name for VMOs in this buffer collection.
6700    ///
6701    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
6702    /// will be truncated to fit. The name of the vmo will be suffixed with the
6703    /// buffer index within the collection (if the suffix fits within
6704    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
6705    /// listed in the inspect data.
6706    ///
6707    /// The name only affects VMOs allocated after the name is set; this call
6708    /// does not rename existing VMOs. If multiple clients set different names
6709    /// then the larger priority value will win. Setting a new name with the
6710    /// same priority as a prior name doesn't change the name.
6711    ///
6712    /// All table fields are currently required.
6713    ///
6714    /// + request `priority` The name is only set if this is the first `SetName`
6715    ///   or if `priority` is greater than any previous `priority` value in
6716    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
6717    /// + request `name` The name for VMOs created under this buffer collection.
6718    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
6719        BufferCollectionTokenProxyInterface::r#set_name(self, payload)
6720    }
6721
6722    /// Set information about the current client that can be used by sysmem to
6723    /// help diagnose leaking memory and allocation stalls waiting for a
6724    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
6725    ///
6726    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
6727    /// `Node`(s) derived from this `Node`, unless overriden by
6728    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
6729    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
6730    ///
6731    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
6732    /// `Allocator` is the most efficient way to ensure that all
6733    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
6734    /// set, and is also more efficient than separately sending the same debug
6735    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
6736    /// created [`fuchsia.sysmem2/Node`].
6737    ///
6738    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
6739    /// indicate which client is closing their channel first, leading to subtree
6740    /// failure (which can be normal if the purpose of the subtree is over, but
6741    /// if happening earlier than expected, the client-channel-specific name can
6742    /// help diagnose where the failure is first coming from, from sysmem's
6743    /// point of view).
6744    ///
6745    /// All table fields are currently required.
6746    ///
6747    /// + request `name` This can be an arbitrary string, but the current
6748    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
6749    /// + request `id` This can be an arbitrary id, but the current process ID
6750    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
6751    pub fn r#set_debug_client_info(
6752        &self,
6753        mut payload: &NodeSetDebugClientInfoRequest,
6754    ) -> Result<(), fidl::Error> {
6755        BufferCollectionTokenProxyInterface::r#set_debug_client_info(self, payload)
6756    }
6757
6758    /// Sysmem logs a warning if sysmem hasn't seen
6759    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
6760    /// within 5 seconds after creation of a new collection.
6761    ///
6762    /// Clients can call this method to change when the log is printed. If
6763    /// multiple client set the deadline, it's unspecified which deadline will
6764    /// take effect.
6765    ///
6766    /// In most cases the default works well.
6767    ///
6768    /// All table fields are currently required.
6769    ///
6770    /// + request `deadline` The time at which sysmem will start trying to log
6771    ///   the warning, unless all constraints are with sysmem by then.
6772    pub fn r#set_debug_timeout_log_deadline(
6773        &self,
6774        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
6775    ) -> Result<(), fidl::Error> {
6776        BufferCollectionTokenProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
6777    }
6778
6779    /// This enables verbose logging for the buffer collection.
6780    ///
6781    /// Verbose logging includes constraints set via
6782    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
6783    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
6784    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
6785    /// the tree of `Node`(s).
6786    ///
6787    /// Normally sysmem prints only a single line complaint when aggregation
6788    /// fails, with just the specific detailed reason that aggregation failed,
6789    /// with little surrounding context.  While this is often enough to diagnose
6790    /// a problem if only a small change was made and everything was working
6791    /// before the small change, it's often not particularly helpful for getting
6792    /// a new buffer collection to work for the first time.  Especially with
6793    /// more complex trees of nodes, involving things like
6794    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
6795    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
6796    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
6797    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
6798    /// looks like and why it's failing a logical allocation, or why a tree or
6799    /// subtree is failing sooner than expected.
6800    ///
6801    /// The intent of the extra logging is to be acceptable from a performance
6802    /// point of view, under the assumption that verbose logging is only enabled
6803    /// on a low number of buffer collections. If we're not tracking down a bug,
6804    /// we shouldn't send this message.
6805    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
6806        BufferCollectionTokenProxyInterface::r#set_verbose_logging(self)
6807    }
6808
6809    /// This gets a handle that can be used as a parameter to
6810    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
6811    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
6812    /// client obtained this handle from this `Node`.
6813    ///
6814    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
6815    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
6816    /// despite the two calls typically being on different channels.
6817    ///
6818    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6819    ///
6820    /// All table fields are currently required.
6821    ///
6822    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6823    ///   different `Node` channel, to prove that the client obtained the handle
6824    ///   from this `Node`.
6825    pub fn r#get_node_ref(
6826        &self,
6827    ) -> fidl::client::QueryResponseFut<
6828        NodeGetNodeRefResponse,
6829        fidl::encoding::DefaultFuchsiaResourceDialect,
6830    > {
6831        BufferCollectionTokenProxyInterface::r#get_node_ref(self)
6832    }
6833
6834    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6835    /// rooted at a different child token of a common parent
6836    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6837    /// passed-in `node_ref`.
6838    ///
6839    /// This call is for assisting with admission control de-duplication, and
6840    /// with debugging.
6841    ///
6842    /// The `node_ref` must be obtained using
6843    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6844    ///
6845    /// The `node_ref` can be a duplicated handle; it's not necessary to call
6846    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6847    ///
6848    /// If a calling token may not actually be a valid token at all due to a
6849    /// potentially hostile/untrusted provider of the token, call
6850    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6851    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6852    /// never responds due to a calling token not being a real token (not really
6853    /// talking to sysmem).  Another option is to call
6854    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6855    /// which also validates the token along with converting it to a
6856    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6857    ///
6858    /// All table fields are currently required.
6859    ///
6860    /// - response `is_alternate`
6861    ///   - true: The first parent node in common between the calling node and
6862    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
6863    ///     that the calling `Node` and the `node_ref` `Node` will not have both
6864    ///     their constraints apply - rather sysmem will choose one or the other
6865    ///     of the constraints - never both.  This is because only one child of
6866    ///     a `BufferCollectionTokenGroup` is selected during logical
6867    ///     allocation, with only that one child's subtree contributing to
6868    ///     constraints aggregation.
6869    ///   - false: The first parent node in common between the calling `Node`
6870    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6871    ///     Currently, this means the first parent node in common is a
6872    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
6873    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
6874    ///     `Node` may have both their constraints apply during constraints
6875    ///     aggregation of the logical allocation, if both `Node`(s) are
6876    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6877    ///     this case, there is no `BufferCollectionTokenGroup` that will
6878    ///     directly prevent the two `Node`(s) from both being selected and
6879    ///     their constraints both aggregated, but even when false, one or both
6880    ///     `Node`(s) may still be eliminated from consideration if one or both
6881    ///     `Node`(s) has a direct or indirect parent
6882    ///     `BufferCollectionTokenGroup` which selects a child subtree other
6883    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
6884    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6885    ///   associated with the same buffer collection as the calling `Node`.
6886    ///   Another reason for this error is if the `node_ref` is an
6887    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6888    ///   a real `node_ref` obtained from `GetNodeRef`.
6889    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6890    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6891    ///   the needed rights expected on a real `node_ref`.
6892    /// * No other failing status codes are returned by this call.  However,
6893    ///   sysmem may add additional codes in future, so the client should have
6894    ///   sensible default handling for any failing status code.
6895    pub fn r#is_alternate_for(
6896        &self,
6897        mut payload: NodeIsAlternateForRequest,
6898    ) -> fidl::client::QueryResponseFut<
6899        NodeIsAlternateForResult,
6900        fidl::encoding::DefaultFuchsiaResourceDialect,
6901    > {
6902        BufferCollectionTokenProxyInterface::r#is_alternate_for(self, payload)
6903    }
6904
6905    /// Get the buffer collection ID. This ID is also available from
6906    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6907    /// within the collection).
6908    ///
6909    /// This call is mainly useful in situations where we can't convey a
6910    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6911    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6912    /// handle, which can be joined back up with a `BufferCollection` client end
6913    /// that was created via a different path. Prefer to convey a
6914    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6915    ///
6916    /// Trusting a `buffer_collection_id` value from a source other than sysmem
6917    /// is analogous to trusting a koid value from a source other than zircon.
6918    /// Both should be avoided unless really necessary, and both require
6919    /// caution. In some situations it may be reasonable to refer to a
6920    /// pre-established `BufferCollection` by `buffer_collection_id` via a
6921    /// protocol for efficiency reasons, but an incoming value purporting to be
6922    /// a `buffer_collection_id` is not sufficient alone to justify granting the
6923    /// sender of the `buffer_collection_id` any capability. The sender must
6924    /// first prove to a receiver that the sender has/had a VMO or has/had a
6925    /// `BufferCollectionToken` to the same collection by sending a handle that
6926    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6927    /// `buffer_collection_id` value. The receiver should take care to avoid
6928    /// assuming that a sender had a `BufferCollectionToken` in cases where the
6929    /// sender has only proven that the sender had a VMO.
6930    ///
6931    /// - response `buffer_collection_id` This ID is unique per buffer
6932    ///   collection per boot. Each buffer is uniquely identified by the
6933    ///   `buffer_collection_id` and `buffer_index` together.
6934    pub fn r#get_buffer_collection_id(
6935        &self,
6936    ) -> fidl::client::QueryResponseFut<
6937        NodeGetBufferCollectionIdResponse,
6938        fidl::encoding::DefaultFuchsiaResourceDialect,
6939    > {
6940        BufferCollectionTokenProxyInterface::r#get_buffer_collection_id(self)
6941    }
6942
6943    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6944    /// created after this message to weak, which means that a client's `Node`
6945    /// client end (or a child created after this message) is not alone
6946    /// sufficient to keep allocated VMOs alive.
6947    ///
6948    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6949    /// `close_weak_asap`.
6950    ///
6951    /// This message is only permitted before the `Node` becomes ready for
6952    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6953    ///   * `BufferCollectionToken`: any time
6954    ///   * `BufferCollection`: before `SetConstraints`
6955    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6956    ///
6957    /// Currently, no conversion from strong `Node` to weak `Node` after ready
6958    /// for allocation is provided, but a client can simulate that by creating
6959    /// an additional `Node` before allocation and setting that additional
6960    /// `Node` to weak, and then potentially at some point later sending
6961    /// `Release` and closing the client end of the client's strong `Node`, but
6962    /// keeping the client's weak `Node`.
6963    ///
6964    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6965    /// collection failure (all `Node` client end(s) will see
6966    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6967    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6968    /// this situation until all `Node`(s) are ready for allocation. For initial
6969    /// allocation to succeed, at least one strong `Node` is required to exist
6970    /// at allocation time, but after that client receives VMO handles, that
6971    /// client can `BufferCollection.Release` and close the client end without
6972    /// causing this type of failure.
6973    ///
6974    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6975    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6976    /// separately as appropriate.
6977    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6978        BufferCollectionTokenProxyInterface::r#set_weak(self)
6979    }
6980
6981    /// This indicates to sysmem that the client is prepared to pay attention to
6982    /// `close_weak_asap`.
6983    ///
6984    /// If sent, this message must be before
6985    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6986    ///
6987    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6988    /// send this message before `WaitForAllBuffersAllocated`, or a parent
6989    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6990    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6991    /// trigger buffer collection failure.
6992    ///
6993    /// This message is necessary because weak sysmem VMOs have not always been
6994    /// a thing, so older clients are not aware of the need to pay attention to
6995    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6996    /// sysmem weak VMO handles asap. By having this message and requiring
6997    /// participants to indicate their acceptance of this aspect of the overall
6998    /// protocol, we avoid situations where an older client is delivered a weak
6999    /// VMO without any way for sysmem to get that VMO to close quickly later
7000    /// (and on a per-buffer basis).
7001    ///
7002    /// A participant that doesn't handle `close_weak_asap` and also doesn't
7003    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
7004    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
7005    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
7006    /// same participant has a child/delegate which does retrieve VMOs, that
7007    /// child/delegate will need to send `SetWeakOk` before
7008    /// `WaitForAllBuffersAllocated`.
7009    ///
7010    /// + request `for_child_nodes_also` If present and true, this means direct
7011    ///   child nodes of this node created after this message plus all
7012    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
7013    ///   those nodes. Any child node of this node that was created before this
7014    ///   message is not included. This setting is "sticky" in the sense that a
7015    ///   subsequent `SetWeakOk` without this bool set to true does not reset
7016    ///   the server-side bool. If this creates a problem for a participant, a
7017    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
7018    ///   tokens instead, as appropriate. A participant should only set
7019    ///   `for_child_nodes_also` true if the participant can really promise to
7020    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
7021    ///   weak VMO handles held by participants holding the corresponding child
7022    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
7023    ///   which are using sysmem(1) can be weak, despite the clients of those
7024    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
7025    ///   direct way to find out about `close_weak_asap`. This only applies to
7026    ///   descendents of this `Node` which are using sysmem(1), not to this
7027    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
7028    ///   token, which will fail allocation unless an ancestor of this `Node`
7029    ///   specified `for_child_nodes_also` true.
7030    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7031        BufferCollectionTokenProxyInterface::r#set_weak_ok(self, payload)
7032    }
7033
7034    /// The server_end will be closed after this `Node` and any child nodes have
7035    /// have released their buffer counts, making those counts available for
7036    /// reservation by a different `Node` via
7037    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
7038    ///
7039    /// The `Node` buffer counts may not be released until the entire tree of
7040    /// `Node`(s) is closed or failed, because
7041    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
7042    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
7043    /// `Node` buffer counts remain reserved until the orphaned node is later
7044    /// cleaned up.
7045    ///
7046    /// If the `Node` exceeds a fairly large number of attached eventpair server
7047    /// ends, a log message will indicate this and the `Node` (and the
7048    /// appropriate) sub-tree will fail.
7049    ///
7050    /// The `server_end` will remain open when
7051    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
7052    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
7053    /// [`fuchsia.sysmem2/BufferCollection`].
7054    ///
7055    /// This message can also be used with a
7056    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7057    pub fn r#attach_node_tracking(
7058        &self,
7059        mut payload: NodeAttachNodeTrackingRequest,
7060    ) -> Result<(), fidl::Error> {
7061        BufferCollectionTokenProxyInterface::r#attach_node_tracking(self, payload)
7062    }
7063
7064    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
7065    /// one, referring to the same buffer collection.
7066    ///
7067    /// The created tokens are children of this token in the
7068    /// [`fuchsia.sysmem2/Node`] heirarchy.
7069    ///
7070    /// This method can be used to add more participants, by transferring the
7071    /// newly created tokens to additional participants.
7072    ///
7073    /// A new token will be returned for each entry in the
7074    /// `rights_attenuation_masks` array.
7075    ///
7076    /// If the called token may not actually be a valid token due to a
7077    /// potentially hostile/untrusted provider of the token, consider using
7078    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
7079    /// instead of potentially getting stuck indefinitely if
7080    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
7081    /// due to the calling token not being a real token.
7082    ///
7083    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
7084    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
7085    /// method, because the sync step is included in this call, at the cost of a
7086    /// round trip during this call.
7087    ///
7088    /// All tokens must be turned in to sysmem via
7089    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7090    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7091    /// successfully allocate buffers (or to logically allocate buffers in the
7092    /// case of subtrees involving
7093    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
7094    ///
7095    /// All table fields are currently required.
7096    ///
7097    /// + request `rights_attenuation_mask` In each entry of
7098    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
7099    ///   in the buffer VMO rights obtainable via the corresponding returned
7100    ///   token. This allows an initiator or intermediary participant to
7101    ///   attenuate the rights available to a participant. This does not allow a
7102    ///   participant to gain rights that the participant doesn't already have.
7103    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
7104    ///   attenuation should be applied.
7105    /// - response `tokens` The client ends of each newly created token.
7106    pub fn r#duplicate_sync(
7107        &self,
7108        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7109    ) -> fidl::client::QueryResponseFut<
7110        BufferCollectionTokenDuplicateSyncResponse,
7111        fidl::encoding::DefaultFuchsiaResourceDialect,
7112    > {
7113        BufferCollectionTokenProxyInterface::r#duplicate_sync(self, payload)
7114    }
7115
7116    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
7117    /// one, referring to the same buffer collection.
7118    ///
7119    /// The created token is a child of this token in the
7120    /// [`fuchsia.sysmem2/Node`] heirarchy.
7121    ///
7122    /// This method can be used to add a participant, by transferring the newly
7123    /// created token to another participant.
7124    ///
7125    /// This one-way message can be used instead of the two-way
7126    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
7127    /// performance sensitive cases where it would be undesireable to wait for
7128    /// sysmem to respond to
7129    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
7130    /// client code isn't structured to make it easy to duplicate all the needed
7131    /// tokens at once.
7132    ///
7133    /// After sending one or more `Duplicate` messages, and before sending the
7134    /// newly created child tokens to other participants (or to other
7135    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
7136    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
7137    /// `Sync` call can be made on the token, or on the `BufferCollection`
7138    /// obtained by passing this token to `BindSharedCollection`.  Either will
7139    /// ensure that the server knows about the tokens created via `Duplicate`
7140    /// before the other participant sends the token to the server via separate
7141    /// `Allocator` channel.
7142    ///
7143    /// All tokens must be turned in via
7144    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7145    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7146    /// successfully allocate buffers.
7147    ///
7148    /// All table fields are currently required.
7149    ///
7150    /// + request `rights_attenuation_mask` The rights bits that are zero in
7151    ///   this mask will be absent in the buffer VMO rights obtainable via the
7152    ///   client end of `token_request`. This allows an initiator or
7153    ///   intermediary participant to attenuate the rights available to a
7154    ///   delegate participant. This does not allow a participant to gain rights
7155    ///   that the participant doesn't already have. The value
7156    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
7157    ///   should be applied.
7158    ///   + These values for rights_attenuation_mask result in no attenuation:
7159    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
7160    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
7161    ///       computed)
7162    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
7163    /// + request `token_request` is the server end of a `BufferCollectionToken`
7164    ///   channel. The client end of this channel acts as another participant in
7165    ///   the shared buffer collection.
7166    pub fn r#duplicate(
7167        &self,
7168        mut payload: BufferCollectionTokenDuplicateRequest,
7169    ) -> Result<(), fidl::Error> {
7170        BufferCollectionTokenProxyInterface::r#duplicate(self, payload)
7171    }
7172
7173    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
7174    ///
7175    /// When the `BufferCollectionToken` is converted to a
7176    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
7177    /// the `BufferCollection` also.
7178    ///
7179    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
7180    /// client end without having sent
7181    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
7182    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
7183    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
7184    /// to the root `Node`, which fails the whole buffer collection. In
7185    /// contrast, a dispensable `Node` can fail after buffers are allocated
7186    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
7187    /// heirarchy.
7188    ///
7189    /// The dispensable `Node` participates in constraints aggregation along
7190    /// with its parent before buffer allocation. If the dispensable `Node`
7191    /// fails before buffers are allocated, the failure propagates to the
7192    /// dispensable `Node`'s parent.
7193    ///
7194    /// After buffers are allocated, failure of the dispensable `Node` (or any
7195    /// child of the dispensable `Node`) does not propagate to the dispensable
7196    /// `Node`'s parent. Failure does propagate from a normal child of a
7197    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
7198    /// blocked from reaching its parent if the child is attached using
7199    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
7200    /// dispensable and the failure occurred after allocation.
7201    ///
7202    /// A dispensable `Node` can be used in cases where a participant needs to
7203    /// provide constraints, but after buffers are allocated, the participant
7204    /// can fail without causing buffer collection failure from the parent
7205    /// `Node`'s point of view.
7206    ///
7207    /// In contrast, `BufferCollection.AttachToken` can be used to create a
7208    /// `BufferCollectionToken` which does not participate in constraints
7209    /// aggregation with its parent `Node`, and whose failure at any time does
7210    /// not propagate to its parent `Node`, and whose potential delay providing
7211    /// constraints does not prevent the parent `Node` from completing its
7212    /// buffer allocation.
7213    ///
7214    /// An initiator (creator of the root `Node` using
7215    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
7216    /// scenarios choose to initially use a dispensable `Node` for a first
7217    /// instance of a participant, and then later if the first instance of that
7218    /// participant fails, a new second instance of that participant my be given
7219    /// a `BufferCollectionToken` created with `AttachToken`.
7220    ///
7221    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
7222    /// shortly before sending the dispensable `BufferCollectionToken` to a
7223    /// delegate participant. Because `SetDispensable` prevents propagation of
7224    /// child `Node` failure to parent `Node`(s), if the client was relying on
7225    /// noticing child failure via failure of the parent `Node` retained by the
7226    /// client, the client may instead need to notice failure via other means.
7227    /// If other means aren't available/convenient, the client can instead
7228    /// retain the dispensable `Node` and create a child `Node` under that to
7229    /// send to the delegate participant, retaining this `Node` in order to
7230    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
7231    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
7232    /// (e.g. starting a new instance of the delegate participant and handing it
7233    /// a `BufferCollectionToken` created using
7234    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
7235    /// and clean up in a client-specific way).
7236    ///
7237    /// While it is possible (and potentially useful) to `SetDispensable` on a
7238    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
7239    /// to later replace a failed dispensable `Node` that was a direct child of
7240    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
7241    /// (since there's no `AttachToken` on a group). Instead, to enable
7242    /// `AttachToken` replacement in this case, create an additional
7243    /// non-dispensable token that's a direct child of the group and make the
7244    /// existing dispensable token a child of the additional token.  This way,
7245    /// the additional token that is a direct child of the group has
7246    /// `BufferCollection.AttachToken` which can be used to replace the failed
7247    /// dispensable token.
7248    ///
7249    /// `SetDispensable` on an already-dispensable token is idempotent.
7250    pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7251        BufferCollectionTokenProxyInterface::r#set_dispensable(self)
7252    }
7253
7254    /// Create a logical OR among a set of tokens, called a
7255    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7256    ///
7257    /// Most sysmem clients and many participants don't need to care about this
7258    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
7259    /// a participant wants to attempt to include one set of delegate
7260    /// participants, but if constraints don't combine successfully that way,
7261    /// fall back to a different (possibly overlapping) set of delegate
7262    /// participants, and/or fall back to a less demanding strategy (in terms of
7263    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
7264    /// across all involved delegate participants). In such cases, a
7265    /// `BufferCollectionTokenGroup` is useful.
7266    ///
7267    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
7268    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
7269    /// which are not selected during aggregation will fail (close), which a
7270    /// potential participant should notice when their `BufferCollection`
7271    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
7272    /// clean up the speculative usage that didn't end up happening (this is
7273    /// simimlar to a normal `BufferCollection` server end closing on failure to
7274    /// allocate a logical buffer collection or later async failure of a buffer
7275    /// collection).
7276    ///
7277    /// See comments on protocol `BufferCollectionTokenGroup`.
7278    ///
7279    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
7280    /// applied to the whole group can be achieved with a
7281    /// `BufferCollectionToken` for this purpose as a direct parent of the
7282    /// `BufferCollectionTokenGroup`.
7283    ///
7284    /// All table fields are currently required.
7285    ///
7286    /// + request `group_request` The server end of a
7287    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
7288    pub fn r#create_buffer_collection_token_group(
7289        &self,
7290        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7291    ) -> Result<(), fidl::Error> {
7292        BufferCollectionTokenProxyInterface::r#create_buffer_collection_token_group(self, payload)
7293    }
7294}
7295
7296impl BufferCollectionTokenProxyInterface for BufferCollectionTokenProxy {
7297    type SyncResponseFut =
7298        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
7299    fn r#sync(&self) -> Self::SyncResponseFut {
7300        fn _decode(
7301            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7302        ) -> Result<(), fidl::Error> {
7303            let _response = fidl::client::decode_transaction_body::<
7304                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
7305                fidl::encoding::DefaultFuchsiaResourceDialect,
7306                0x11ac2555cf575b54,
7307            >(_buf?)?
7308            .into_result::<BufferCollectionTokenMarker>("sync")?;
7309            Ok(_response)
7310        }
7311        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
7312            (),
7313            0x11ac2555cf575b54,
7314            fidl::encoding::DynamicFlags::FLEXIBLE,
7315            _decode,
7316        )
7317    }
7318
7319    fn r#release(&self) -> Result<(), fidl::Error> {
7320        self.client.send::<fidl::encoding::EmptyPayload>(
7321            (),
7322            0x6a5cae7d6d6e04c6,
7323            fidl::encoding::DynamicFlags::FLEXIBLE,
7324        )
7325    }
7326
7327    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
7328        self.client.send::<NodeSetNameRequest>(
7329            payload,
7330            0xb41f1624f48c1e9,
7331            fidl::encoding::DynamicFlags::FLEXIBLE,
7332        )
7333    }
7334
7335    fn r#set_debug_client_info(
7336        &self,
7337        mut payload: &NodeSetDebugClientInfoRequest,
7338    ) -> Result<(), fidl::Error> {
7339        self.client.send::<NodeSetDebugClientInfoRequest>(
7340            payload,
7341            0x5cde8914608d99b1,
7342            fidl::encoding::DynamicFlags::FLEXIBLE,
7343        )
7344    }
7345
7346    fn r#set_debug_timeout_log_deadline(
7347        &self,
7348        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
7349    ) -> Result<(), fidl::Error> {
7350        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
7351            payload,
7352            0x716b0af13d5c0806,
7353            fidl::encoding::DynamicFlags::FLEXIBLE,
7354        )
7355    }
7356
7357    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
7358        self.client.send::<fidl::encoding::EmptyPayload>(
7359            (),
7360            0x5209c77415b4dfad,
7361            fidl::encoding::DynamicFlags::FLEXIBLE,
7362        )
7363    }
7364
7365    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
7366        NodeGetNodeRefResponse,
7367        fidl::encoding::DefaultFuchsiaResourceDialect,
7368    >;
7369    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
7370        fn _decode(
7371            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7372        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
7373            let _response = fidl::client::decode_transaction_body::<
7374                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
7375                fidl::encoding::DefaultFuchsiaResourceDialect,
7376                0x5b3d0e51614df053,
7377            >(_buf?)?
7378            .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
7379            Ok(_response)
7380        }
7381        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
7382            (),
7383            0x5b3d0e51614df053,
7384            fidl::encoding::DynamicFlags::FLEXIBLE,
7385            _decode,
7386        )
7387    }
7388
7389    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
7390        NodeIsAlternateForResult,
7391        fidl::encoding::DefaultFuchsiaResourceDialect,
7392    >;
7393    fn r#is_alternate_for(
7394        &self,
7395        mut payload: NodeIsAlternateForRequest,
7396    ) -> Self::IsAlternateForResponseFut {
7397        fn _decode(
7398            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7399        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
7400            let _response = fidl::client::decode_transaction_body::<
7401                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
7402                fidl::encoding::DefaultFuchsiaResourceDialect,
7403                0x3a58e00157e0825,
7404            >(_buf?)?
7405            .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
7406            Ok(_response.map(|x| x))
7407        }
7408        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
7409            &mut payload,
7410            0x3a58e00157e0825,
7411            fidl::encoding::DynamicFlags::FLEXIBLE,
7412            _decode,
7413        )
7414    }
7415
7416    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
7417        NodeGetBufferCollectionIdResponse,
7418        fidl::encoding::DefaultFuchsiaResourceDialect,
7419    >;
7420    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
7421        fn _decode(
7422            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7423        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
7424            let _response = fidl::client::decode_transaction_body::<
7425                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
7426                fidl::encoding::DefaultFuchsiaResourceDialect,
7427                0x77d19a494b78ba8c,
7428            >(_buf?)?
7429            .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
7430            Ok(_response)
7431        }
7432        self.client.send_query_and_decode::<
7433            fidl::encoding::EmptyPayload,
7434            NodeGetBufferCollectionIdResponse,
7435        >(
7436            (),
7437            0x77d19a494b78ba8c,
7438            fidl::encoding::DynamicFlags::FLEXIBLE,
7439            _decode,
7440        )
7441    }
7442
7443    fn r#set_weak(&self) -> Result<(), fidl::Error> {
7444        self.client.send::<fidl::encoding::EmptyPayload>(
7445            (),
7446            0x22dd3ea514eeffe1,
7447            fidl::encoding::DynamicFlags::FLEXIBLE,
7448        )
7449    }
7450
7451    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7452        self.client.send::<NodeSetWeakOkRequest>(
7453            &mut payload,
7454            0x38a44fc4d7724be9,
7455            fidl::encoding::DynamicFlags::FLEXIBLE,
7456        )
7457    }
7458
7459    fn r#attach_node_tracking(
7460        &self,
7461        mut payload: NodeAttachNodeTrackingRequest,
7462    ) -> Result<(), fidl::Error> {
7463        self.client.send::<NodeAttachNodeTrackingRequest>(
7464            &mut payload,
7465            0x3f22f2a293d3cdac,
7466            fidl::encoding::DynamicFlags::FLEXIBLE,
7467        )
7468    }
7469
7470    type DuplicateSyncResponseFut = fidl::client::QueryResponseFut<
7471        BufferCollectionTokenDuplicateSyncResponse,
7472        fidl::encoding::DefaultFuchsiaResourceDialect,
7473    >;
7474    fn r#duplicate_sync(
7475        &self,
7476        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7477    ) -> Self::DuplicateSyncResponseFut {
7478        fn _decode(
7479            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7480        ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
7481            let _response = fidl::client::decode_transaction_body::<
7482                fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
7483                fidl::encoding::DefaultFuchsiaResourceDialect,
7484                0x1c1af9919d1ca45c,
7485            >(_buf?)?
7486            .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
7487            Ok(_response)
7488        }
7489        self.client.send_query_and_decode::<
7490            BufferCollectionTokenDuplicateSyncRequest,
7491            BufferCollectionTokenDuplicateSyncResponse,
7492        >(
7493            payload,
7494            0x1c1af9919d1ca45c,
7495            fidl::encoding::DynamicFlags::FLEXIBLE,
7496            _decode,
7497        )
7498    }
7499
7500    fn r#duplicate(
7501        &self,
7502        mut payload: BufferCollectionTokenDuplicateRequest,
7503    ) -> Result<(), fidl::Error> {
7504        self.client.send::<BufferCollectionTokenDuplicateRequest>(
7505            &mut payload,
7506            0x73e78f92ee7fb887,
7507            fidl::encoding::DynamicFlags::FLEXIBLE,
7508        )
7509    }
7510
7511    fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7512        self.client.send::<fidl::encoding::EmptyPayload>(
7513            (),
7514            0x228acf979254df8b,
7515            fidl::encoding::DynamicFlags::FLEXIBLE,
7516        )
7517    }
7518
7519    fn r#create_buffer_collection_token_group(
7520        &self,
7521        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7522    ) -> Result<(), fidl::Error> {
7523        self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
7524            &mut payload,
7525            0x30f8d48e77bd36f2,
7526            fidl::encoding::DynamicFlags::FLEXIBLE,
7527        )
7528    }
7529}
7530
7531pub struct BufferCollectionTokenEventStream {
7532    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
7533}
7534
7535impl std::marker::Unpin for BufferCollectionTokenEventStream {}
7536
7537impl futures::stream::FusedStream for BufferCollectionTokenEventStream {
7538    fn is_terminated(&self) -> bool {
7539        self.event_receiver.is_terminated()
7540    }
7541}
7542
7543impl futures::Stream for BufferCollectionTokenEventStream {
7544    type Item = Result<BufferCollectionTokenEvent, fidl::Error>;
7545
7546    fn poll_next(
7547        mut self: std::pin::Pin<&mut Self>,
7548        cx: &mut std::task::Context<'_>,
7549    ) -> std::task::Poll<Option<Self::Item>> {
7550        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
7551            &mut self.event_receiver,
7552            cx
7553        )?) {
7554            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenEvent::decode(buf))),
7555            None => std::task::Poll::Ready(None),
7556        }
7557    }
7558}
7559
7560#[derive(Debug)]
7561pub enum BufferCollectionTokenEvent {
7562    #[non_exhaustive]
7563    _UnknownEvent {
7564        /// Ordinal of the event that was sent.
7565        ordinal: u64,
7566    },
7567}
7568
7569impl BufferCollectionTokenEvent {
7570    /// Decodes a message buffer as a [`BufferCollectionTokenEvent`].
7571    fn decode(
7572        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
7573    ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
7574        let (bytes, _handles) = buf.split_mut();
7575        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7576        debug_assert_eq!(tx_header.tx_id, 0);
7577        match tx_header.ordinal {
7578            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7579                Ok(BufferCollectionTokenEvent::_UnknownEvent { ordinal: tx_header.ordinal })
7580            }
7581            _ => Err(fidl::Error::UnknownOrdinal {
7582                ordinal: tx_header.ordinal,
7583                protocol_name:
7584                    <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7585            }),
7586        }
7587    }
7588}
7589
7590/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionToken.
7591pub struct BufferCollectionTokenRequestStream {
7592    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7593    is_terminated: bool,
7594}
7595
7596impl std::marker::Unpin for BufferCollectionTokenRequestStream {}
7597
7598impl futures::stream::FusedStream for BufferCollectionTokenRequestStream {
7599    fn is_terminated(&self) -> bool {
7600        self.is_terminated
7601    }
7602}
7603
7604impl fidl::endpoints::RequestStream for BufferCollectionTokenRequestStream {
7605    type Protocol = BufferCollectionTokenMarker;
7606    type ControlHandle = BufferCollectionTokenControlHandle;
7607
7608    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
7609        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
7610    }
7611
7612    fn control_handle(&self) -> Self::ControlHandle {
7613        BufferCollectionTokenControlHandle { inner: self.inner.clone() }
7614    }
7615
7616    fn into_inner(
7617        self,
7618    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
7619    {
7620        (self.inner, self.is_terminated)
7621    }
7622
7623    fn from_inner(
7624        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7625        is_terminated: bool,
7626    ) -> Self {
7627        Self { inner, is_terminated }
7628    }
7629}
7630
7631impl futures::Stream for BufferCollectionTokenRequestStream {
7632    type Item = Result<BufferCollectionTokenRequest, fidl::Error>;
7633
7634    fn poll_next(
7635        mut self: std::pin::Pin<&mut Self>,
7636        cx: &mut std::task::Context<'_>,
7637    ) -> std::task::Poll<Option<Self::Item>> {
7638        let this = &mut *self;
7639        if this.inner.check_shutdown(cx) {
7640            this.is_terminated = true;
7641            return std::task::Poll::Ready(None);
7642        }
7643        if this.is_terminated {
7644            panic!("polled BufferCollectionTokenRequestStream after completion");
7645        }
7646        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
7647            |bytes, handles| {
7648                match this.inner.channel().read_etc(cx, bytes, handles) {
7649                    std::task::Poll::Ready(Ok(())) => {}
7650                    std::task::Poll::Pending => return std::task::Poll::Pending,
7651                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
7652                        this.is_terminated = true;
7653                        return std::task::Poll::Ready(None);
7654                    }
7655                    std::task::Poll::Ready(Err(e)) => {
7656                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
7657                            e.into(),
7658                        ))));
7659                    }
7660                }
7661
7662                // A message has been received from the channel
7663                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7664
7665                std::task::Poll::Ready(Some(match header.ordinal {
7666                0x11ac2555cf575b54 => {
7667                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7668                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7669                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7670                    let control_handle = BufferCollectionTokenControlHandle {
7671                        inner: this.inner.clone(),
7672                    };
7673                    Ok(BufferCollectionTokenRequest::Sync {
7674                        responder: BufferCollectionTokenSyncResponder {
7675                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7676                            tx_id: header.tx_id,
7677                        },
7678                    })
7679                }
7680                0x6a5cae7d6d6e04c6 => {
7681                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7682                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7683                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7684                    let control_handle = BufferCollectionTokenControlHandle {
7685                        inner: this.inner.clone(),
7686                    };
7687                    Ok(BufferCollectionTokenRequest::Release {
7688                        control_handle,
7689                    })
7690                }
7691                0xb41f1624f48c1e9 => {
7692                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7693                    let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7694                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
7695                    let control_handle = BufferCollectionTokenControlHandle {
7696                        inner: this.inner.clone(),
7697                    };
7698                    Ok(BufferCollectionTokenRequest::SetName {payload: req,
7699                        control_handle,
7700                    })
7701                }
7702                0x5cde8914608d99b1 => {
7703                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7704                    let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7705                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
7706                    let control_handle = BufferCollectionTokenControlHandle {
7707                        inner: this.inner.clone(),
7708                    };
7709                    Ok(BufferCollectionTokenRequest::SetDebugClientInfo {payload: req,
7710                        control_handle,
7711                    })
7712                }
7713                0x716b0af13d5c0806 => {
7714                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7715                    let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7716                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
7717                    let control_handle = BufferCollectionTokenControlHandle {
7718                        inner: this.inner.clone(),
7719                    };
7720                    Ok(BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {payload: req,
7721                        control_handle,
7722                    })
7723                }
7724                0x5209c77415b4dfad => {
7725                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7726                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7727                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7728                    let control_handle = BufferCollectionTokenControlHandle {
7729                        inner: this.inner.clone(),
7730                    };
7731                    Ok(BufferCollectionTokenRequest::SetVerboseLogging {
7732                        control_handle,
7733                    })
7734                }
7735                0x5b3d0e51614df053 => {
7736                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7737                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7738                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7739                    let control_handle = BufferCollectionTokenControlHandle {
7740                        inner: this.inner.clone(),
7741                    };
7742                    Ok(BufferCollectionTokenRequest::GetNodeRef {
7743                        responder: BufferCollectionTokenGetNodeRefResponder {
7744                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7745                            tx_id: header.tx_id,
7746                        },
7747                    })
7748                }
7749                0x3a58e00157e0825 => {
7750                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7751                    let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7752                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
7753                    let control_handle = BufferCollectionTokenControlHandle {
7754                        inner: this.inner.clone(),
7755                    };
7756                    Ok(BufferCollectionTokenRequest::IsAlternateFor {payload: req,
7757                        responder: BufferCollectionTokenIsAlternateForResponder {
7758                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7759                            tx_id: header.tx_id,
7760                        },
7761                    })
7762                }
7763                0x77d19a494b78ba8c => {
7764                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7765                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7766                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7767                    let control_handle = BufferCollectionTokenControlHandle {
7768                        inner: this.inner.clone(),
7769                    };
7770                    Ok(BufferCollectionTokenRequest::GetBufferCollectionId {
7771                        responder: BufferCollectionTokenGetBufferCollectionIdResponder {
7772                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7773                            tx_id: header.tx_id,
7774                        },
7775                    })
7776                }
7777                0x22dd3ea514eeffe1 => {
7778                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7779                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7780                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7781                    let control_handle = BufferCollectionTokenControlHandle {
7782                        inner: this.inner.clone(),
7783                    };
7784                    Ok(BufferCollectionTokenRequest::SetWeak {
7785                        control_handle,
7786                    })
7787                }
7788                0x38a44fc4d7724be9 => {
7789                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7790                    let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7791                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
7792                    let control_handle = BufferCollectionTokenControlHandle {
7793                        inner: this.inner.clone(),
7794                    };
7795                    Ok(BufferCollectionTokenRequest::SetWeakOk {payload: req,
7796                        control_handle,
7797                    })
7798                }
7799                0x3f22f2a293d3cdac => {
7800                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7801                    let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7802                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
7803                    let control_handle = BufferCollectionTokenControlHandle {
7804                        inner: this.inner.clone(),
7805                    };
7806                    Ok(BufferCollectionTokenRequest::AttachNodeTracking {payload: req,
7807                        control_handle,
7808                    })
7809                }
7810                0x1c1af9919d1ca45c => {
7811                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7812                    let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7813                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateSyncRequest>(&header, _body_bytes, handles, &mut req)?;
7814                    let control_handle = BufferCollectionTokenControlHandle {
7815                        inner: this.inner.clone(),
7816                    };
7817                    Ok(BufferCollectionTokenRequest::DuplicateSync {payload: req,
7818                        responder: BufferCollectionTokenDuplicateSyncResponder {
7819                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7820                            tx_id: header.tx_id,
7821                        },
7822                    })
7823                }
7824                0x73e78f92ee7fb887 => {
7825                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7826                    let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7827                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateRequest>(&header, _body_bytes, handles, &mut req)?;
7828                    let control_handle = BufferCollectionTokenControlHandle {
7829                        inner: this.inner.clone(),
7830                    };
7831                    Ok(BufferCollectionTokenRequest::Duplicate {payload: req,
7832                        control_handle,
7833                    })
7834                }
7835                0x228acf979254df8b => {
7836                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7837                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7838                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7839                    let control_handle = BufferCollectionTokenControlHandle {
7840                        inner: this.inner.clone(),
7841                    };
7842                    Ok(BufferCollectionTokenRequest::SetDispensable {
7843                        control_handle,
7844                    })
7845                }
7846                0x30f8d48e77bd36f2 => {
7847                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7848                    let mut req = fidl::new_empty!(BufferCollectionTokenCreateBufferCollectionTokenGroupRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7849                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(&header, _body_bytes, handles, &mut req)?;
7850                    let control_handle = BufferCollectionTokenControlHandle {
7851                        inner: this.inner.clone(),
7852                    };
7853                    Ok(BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {payload: req,
7854                        control_handle,
7855                    })
7856                }
7857                _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7858                    Ok(BufferCollectionTokenRequest::_UnknownMethod {
7859                        ordinal: header.ordinal,
7860                        control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7861                        method_type: fidl::MethodType::OneWay,
7862                    })
7863                }
7864                _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7865                    this.inner.send_framework_err(
7866                        fidl::encoding::FrameworkErr::UnknownMethod,
7867                        header.tx_id,
7868                        header.ordinal,
7869                        header.dynamic_flags(),
7870                        (bytes, handles),
7871                    )?;
7872                    Ok(BufferCollectionTokenRequest::_UnknownMethod {
7873                        ordinal: header.ordinal,
7874                        control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7875                        method_type: fidl::MethodType::TwoWay,
7876                    })
7877                }
7878                _ => Err(fidl::Error::UnknownOrdinal {
7879                    ordinal: header.ordinal,
7880                    protocol_name: <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7881                }),
7882            }))
7883            },
7884        )
7885    }
7886}
7887
7888/// A [`fuchsia.sysmem2/BufferCollectionToken`] is not a buffer collection, but
7889/// rather is a way to identify a specific potential shared buffer collection,
7890/// and a way to distribute that potential shared buffer collection to
7891/// additional participants prior to the buffer collection allocating any
7892/// buffers.
7893///
7894/// Epitaphs are not used in this protocol.
7895///
7896/// We use a channel for the `BufferCollectionToken` instead of a single
7897/// `eventpair` (pair) because this way we can detect error conditions like a
7898/// participant failing mid-create.
7899#[derive(Debug)]
7900pub enum BufferCollectionTokenRequest {
7901    /// Ensure that previous messages have been received server side. This is
7902    /// particularly useful after previous messages that created new tokens,
7903    /// because a token must be known to the sysmem server before sending the
7904    /// token to another participant.
7905    ///
7906    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
7907    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
7908    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
7909    /// to mitigate the possibility of a hostile/fake
7910    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
7911    /// Another way is to pass the token to
7912    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
7913    /// the token as part of exchanging it for a
7914    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
7915    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
7916    /// of stalling.
7917    ///
7918    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
7919    /// and then starting and completing a `Sync`, it's then safe to send the
7920    /// `BufferCollectionToken` client ends to other participants knowing the
7921    /// server will recognize the tokens when they're sent by the other
7922    /// participants to sysmem in a
7923    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
7924    /// efficient way to create tokens while avoiding unnecessary round trips.
7925    ///
7926    /// Other options include waiting for each
7927    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
7928    /// individually (using separate call to `Sync` after each), or calling
7929    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
7930    /// converted to a `BufferCollection` via
7931    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
7932    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
7933    /// the sync step and can create multiple tokens at once.
7934    Sync { responder: BufferCollectionTokenSyncResponder },
7935    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
7936    ///
7937    /// Normally a participant will convert a `BufferCollectionToken` into a
7938    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
7939    /// `Release` via the token (and then close the channel immediately or
7940    /// shortly later in response to server closing the server end), which
7941    /// avoids causing buffer collection failure. Without a prior `Release`,
7942    /// closing the `BufferCollectionToken` client end will cause buffer
7943    /// collection failure.
7944    ///
7945    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
7946    ///
7947    /// By default the server handles unexpected closure of a
7948    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
7949    /// first) by failing the buffer collection. Partly this is to expedite
7950    /// closing VMO handles to reclaim memory when any participant fails. If a
7951    /// participant would like to cleanly close a `BufferCollection` without
7952    /// causing buffer collection failure, the participant can send `Release`
7953    /// before closing the `BufferCollection` client end. The `Release` can
7954    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
7955    /// buffer collection won't require constraints from this node in order to
7956    /// allocate. If after `SetConstraints`, the constraints are retained and
7957    /// aggregated, despite the lack of `BufferCollection` connection at the
7958    /// time of constraints aggregation.
7959    ///
7960    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
7961    ///
7962    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
7963    /// end (without `Release` first) will trigger failure of the buffer
7964    /// collection. To close a `BufferCollectionTokenGroup` channel without
7965    /// failing the buffer collection, ensure that AllChildrenPresent() has been
7966    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
7967    /// client end.
7968    ///
7969    /// If `Release` occurs before
7970    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
7971    /// buffer collection will fail (triggered by reception of `Release` without
7972    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
7973    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
7974    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
7975    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
7976    /// close requires `AllChildrenPresent` (if not already sent), then
7977    /// `Release`, then close client end.
7978    ///
7979    /// If `Release` occurs after `AllChildrenPresent`, the children and all
7980    /// their constraints remain intact (just as they would if the
7981    /// `BufferCollectionTokenGroup` channel had remained open), and the client
7982    /// end close doesn't trigger buffer collection failure.
7983    ///
7984    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
7985    ///
7986    /// For brevity, the per-channel-protocol paragraphs above ignore the
7987    /// separate failure domain created by
7988    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
7989    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
7990    /// unexpectedly closes (without `Release` first) and that client end is
7991    /// under a failure domain, instead of failing the whole buffer collection,
7992    /// the failure domain is failed, but the buffer collection itself is
7993    /// isolated from failure of the failure domain. Such failure domains can be
7994    /// nested, in which case only the inner-most failure domain in which the
7995    /// `Node` resides fails.
7996    Release { control_handle: BufferCollectionTokenControlHandle },
7997    /// Set a name for VMOs in this buffer collection.
7998    ///
7999    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
8000    /// will be truncated to fit. The name of the vmo will be suffixed with the
8001    /// buffer index within the collection (if the suffix fits within
8002    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
8003    /// listed in the inspect data.
8004    ///
8005    /// The name only affects VMOs allocated after the name is set; this call
8006    /// does not rename existing VMOs. If multiple clients set different names
8007    /// then the larger priority value will win. Setting a new name with the
8008    /// same priority as a prior name doesn't change the name.
8009    ///
8010    /// All table fields are currently required.
8011    ///
8012    /// + request `priority` The name is only set if this is the first `SetName`
8013    ///   or if `priority` is greater than any previous `priority` value in
8014    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
8015    /// + request `name` The name for VMOs created under this buffer collection.
8016    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenControlHandle },
8017    /// Set information about the current client that can be used by sysmem to
8018    /// help diagnose leaking memory and allocation stalls waiting for a
8019    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
8020    ///
8021    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
8022    /// `Node`(s) derived from this `Node`, unless overriden by
8023    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
8024    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
8025    ///
8026    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
8027    /// `Allocator` is the most efficient way to ensure that all
8028    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
8029    /// set, and is also more efficient than separately sending the same debug
8030    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
8031    /// created [`fuchsia.sysmem2/Node`].
8032    ///
8033    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
8034    /// indicate which client is closing their channel first, leading to subtree
8035    /// failure (which can be normal if the purpose of the subtree is over, but
8036    /// if happening earlier than expected, the client-channel-specific name can
8037    /// help diagnose where the failure is first coming from, from sysmem's
8038    /// point of view).
8039    ///
8040    /// All table fields are currently required.
8041    ///
8042    /// + request `name` This can be an arbitrary string, but the current
8043    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
8044    /// + request `id` This can be an arbitrary id, but the current process ID
8045    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
8046    SetDebugClientInfo {
8047        payload: NodeSetDebugClientInfoRequest,
8048        control_handle: BufferCollectionTokenControlHandle,
8049    },
8050    /// Sysmem logs a warning if sysmem hasn't seen
8051    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
8052    /// within 5 seconds after creation of a new collection.
8053    ///
8054    /// Clients can call this method to change when the log is printed. If
8055    /// multiple client set the deadline, it's unspecified which deadline will
8056    /// take effect.
8057    ///
8058    /// In most cases the default works well.
8059    ///
8060    /// All table fields are currently required.
8061    ///
8062    /// + request `deadline` The time at which sysmem will start trying to log
8063    ///   the warning, unless all constraints are with sysmem by then.
8064    SetDebugTimeoutLogDeadline {
8065        payload: NodeSetDebugTimeoutLogDeadlineRequest,
8066        control_handle: BufferCollectionTokenControlHandle,
8067    },
8068    /// This enables verbose logging for the buffer collection.
8069    ///
8070    /// Verbose logging includes constraints set via
8071    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
8072    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
8073    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
8074    /// the tree of `Node`(s).
8075    ///
8076    /// Normally sysmem prints only a single line complaint when aggregation
8077    /// fails, with just the specific detailed reason that aggregation failed,
8078    /// with little surrounding context.  While this is often enough to diagnose
8079    /// a problem if only a small change was made and everything was working
8080    /// before the small change, it's often not particularly helpful for getting
8081    /// a new buffer collection to work for the first time.  Especially with
8082    /// more complex trees of nodes, involving things like
8083    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
8084    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
8085    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
8086    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
8087    /// looks like and why it's failing a logical allocation, or why a tree or
8088    /// subtree is failing sooner than expected.
8089    ///
8090    /// The intent of the extra logging is to be acceptable from a performance
8091    /// point of view, under the assumption that verbose logging is only enabled
8092    /// on a low number of buffer collections. If we're not tracking down a bug,
8093    /// we shouldn't send this message.
8094    SetVerboseLogging { control_handle: BufferCollectionTokenControlHandle },
8095    /// This gets a handle that can be used as a parameter to
8096    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
8097    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
8098    /// client obtained this handle from this `Node`.
8099    ///
8100    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
8101    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
8102    /// despite the two calls typically being on different channels.
8103    ///
8104    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
8105    ///
8106    /// All table fields are currently required.
8107    ///
8108    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
8109    ///   different `Node` channel, to prove that the client obtained the handle
8110    ///   from this `Node`.
8111    GetNodeRef { responder: BufferCollectionTokenGetNodeRefResponder },
8112    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
8113    /// rooted at a different child token of a common parent
8114    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
8115    /// passed-in `node_ref`.
8116    ///
8117    /// This call is for assisting with admission control de-duplication, and
8118    /// with debugging.
8119    ///
8120    /// The `node_ref` must be obtained using
8121    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
8122    ///
8123    /// The `node_ref` can be a duplicated handle; it's not necessary to call
8124    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
8125    ///
8126    /// If a calling token may not actually be a valid token at all due to a
8127    /// potentially hostile/untrusted provider of the token, call
8128    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8129    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
8130    /// never responds due to a calling token not being a real token (not really
8131    /// talking to sysmem).  Another option is to call
8132    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
8133    /// which also validates the token along with converting it to a
8134    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
8135    ///
8136    /// All table fields are currently required.
8137    ///
8138    /// - response `is_alternate`
8139    ///   - true: The first parent node in common between the calling node and
8140    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
8141    ///     that the calling `Node` and the `node_ref` `Node` will not have both
8142    ///     their constraints apply - rather sysmem will choose one or the other
8143    ///     of the constraints - never both.  This is because only one child of
8144    ///     a `BufferCollectionTokenGroup` is selected during logical
8145    ///     allocation, with only that one child's subtree contributing to
8146    ///     constraints aggregation.
8147    ///   - false: The first parent node in common between the calling `Node`
8148    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
8149    ///     Currently, this means the first parent node in common is a
8150    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
8151    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
8152    ///     `Node` may have both their constraints apply during constraints
8153    ///     aggregation of the logical allocation, if both `Node`(s) are
8154    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
8155    ///     this case, there is no `BufferCollectionTokenGroup` that will
8156    ///     directly prevent the two `Node`(s) from both being selected and
8157    ///     their constraints both aggregated, but even when false, one or both
8158    ///     `Node`(s) may still be eliminated from consideration if one or both
8159    ///     `Node`(s) has a direct or indirect parent
8160    ///     `BufferCollectionTokenGroup` which selects a child subtree other
8161    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
8162    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
8163    ///   associated with the same buffer collection as the calling `Node`.
8164    ///   Another reason for this error is if the `node_ref` is an
8165    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
8166    ///   a real `node_ref` obtained from `GetNodeRef`.
8167    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
8168    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
8169    ///   the needed rights expected on a real `node_ref`.
8170    /// * No other failing status codes are returned by this call.  However,
8171    ///   sysmem may add additional codes in future, so the client should have
8172    ///   sensible default handling for any failing status code.
8173    IsAlternateFor {
8174        payload: NodeIsAlternateForRequest,
8175        responder: BufferCollectionTokenIsAlternateForResponder,
8176    },
8177    /// Get the buffer collection ID. This ID is also available from
8178    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
8179    /// within the collection).
8180    ///
8181    /// This call is mainly useful in situations where we can't convey a
8182    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
8183    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
8184    /// handle, which can be joined back up with a `BufferCollection` client end
8185    /// that was created via a different path. Prefer to convey a
8186    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
8187    ///
8188    /// Trusting a `buffer_collection_id` value from a source other than sysmem
8189    /// is analogous to trusting a koid value from a source other than zircon.
8190    /// Both should be avoided unless really necessary, and both require
8191    /// caution. In some situations it may be reasonable to refer to a
8192    /// pre-established `BufferCollection` by `buffer_collection_id` via a
8193    /// protocol for efficiency reasons, but an incoming value purporting to be
8194    /// a `buffer_collection_id` is not sufficient alone to justify granting the
8195    /// sender of the `buffer_collection_id` any capability. The sender must
8196    /// first prove to a receiver that the sender has/had a VMO or has/had a
8197    /// `BufferCollectionToken` to the same collection by sending a handle that
8198    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
8199    /// `buffer_collection_id` value. The receiver should take care to avoid
8200    /// assuming that a sender had a `BufferCollectionToken` in cases where the
8201    /// sender has only proven that the sender had a VMO.
8202    ///
8203    /// - response `buffer_collection_id` This ID is unique per buffer
8204    ///   collection per boot. Each buffer is uniquely identified by the
8205    ///   `buffer_collection_id` and `buffer_index` together.
8206    GetBufferCollectionId { responder: BufferCollectionTokenGetBufferCollectionIdResponder },
8207    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
8208    /// created after this message to weak, which means that a client's `Node`
8209    /// client end (or a child created after this message) is not alone
8210    /// sufficient to keep allocated VMOs alive.
8211    ///
8212    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
8213    /// `close_weak_asap`.
8214    ///
8215    /// This message is only permitted before the `Node` becomes ready for
8216    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
8217    ///   * `BufferCollectionToken`: any time
8218    ///   * `BufferCollection`: before `SetConstraints`
8219    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
8220    ///
8221    /// Currently, no conversion from strong `Node` to weak `Node` after ready
8222    /// for allocation is provided, but a client can simulate that by creating
8223    /// an additional `Node` before allocation and setting that additional
8224    /// `Node` to weak, and then potentially at some point later sending
8225    /// `Release` and closing the client end of the client's strong `Node`, but
8226    /// keeping the client's weak `Node`.
8227    ///
8228    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
8229    /// collection failure (all `Node` client end(s) will see
8230    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
8231    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
8232    /// this situation until all `Node`(s) are ready for allocation. For initial
8233    /// allocation to succeed, at least one strong `Node` is required to exist
8234    /// at allocation time, but after that client receives VMO handles, that
8235    /// client can `BufferCollection.Release` and close the client end without
8236    /// causing this type of failure.
8237    ///
8238    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
8239    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
8240    /// separately as appropriate.
8241    SetWeak { control_handle: BufferCollectionTokenControlHandle },
8242    /// This indicates to sysmem that the client is prepared to pay attention to
8243    /// `close_weak_asap`.
8244    ///
8245    /// If sent, this message must be before
8246    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
8247    ///
8248    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
8249    /// send this message before `WaitForAllBuffersAllocated`, or a parent
8250    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
8251    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
8252    /// trigger buffer collection failure.
8253    ///
8254    /// This message is necessary because weak sysmem VMOs have not always been
8255    /// a thing, so older clients are not aware of the need to pay attention to
8256    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
8257    /// sysmem weak VMO handles asap. By having this message and requiring
8258    /// participants to indicate their acceptance of this aspect of the overall
8259    /// protocol, we avoid situations where an older client is delivered a weak
8260    /// VMO without any way for sysmem to get that VMO to close quickly later
8261    /// (and on a per-buffer basis).
8262    ///
8263    /// A participant that doesn't handle `close_weak_asap` and also doesn't
8264    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
8265    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
8266    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
8267    /// same participant has a child/delegate which does retrieve VMOs, that
8268    /// child/delegate will need to send `SetWeakOk` before
8269    /// `WaitForAllBuffersAllocated`.
8270    ///
8271    /// + request `for_child_nodes_also` If present and true, this means direct
8272    ///   child nodes of this node created after this message plus all
8273    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
8274    ///   those nodes. Any child node of this node that was created before this
8275    ///   message is not included. This setting is "sticky" in the sense that a
8276    ///   subsequent `SetWeakOk` without this bool set to true does not reset
8277    ///   the server-side bool. If this creates a problem for a participant, a
8278    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
8279    ///   tokens instead, as appropriate. A participant should only set
8280    ///   `for_child_nodes_also` true if the participant can really promise to
8281    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
8282    ///   weak VMO handles held by participants holding the corresponding child
8283    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
8284    ///   which are using sysmem(1) can be weak, despite the clients of those
8285    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
8286    ///   direct way to find out about `close_weak_asap`. This only applies to
8287    ///   descendents of this `Node` which are using sysmem(1), not to this
8288    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
8289    ///   token, which will fail allocation unless an ancestor of this `Node`
8290    ///   specified `for_child_nodes_also` true.
8291    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionTokenControlHandle },
8292    /// The server_end will be closed after this `Node` and any child nodes have
8293    /// have released their buffer counts, making those counts available for
8294    /// reservation by a different `Node` via
8295    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
8296    ///
8297    /// The `Node` buffer counts may not be released until the entire tree of
8298    /// `Node`(s) is closed or failed, because
8299    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
8300    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
8301    /// `Node` buffer counts remain reserved until the orphaned node is later
8302    /// cleaned up.
8303    ///
8304    /// If the `Node` exceeds a fairly large number of attached eventpair server
8305    /// ends, a log message will indicate this and the `Node` (and the
8306    /// appropriate) sub-tree will fail.
8307    ///
8308    /// The `server_end` will remain open when
8309    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
8310    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
8311    /// [`fuchsia.sysmem2/BufferCollection`].
8312    ///
8313    /// This message can also be used with a
8314    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8315    AttachNodeTracking {
8316        payload: NodeAttachNodeTrackingRequest,
8317        control_handle: BufferCollectionTokenControlHandle,
8318    },
8319    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
8320    /// one, referring to the same buffer collection.
8321    ///
8322    /// The created tokens are children of this token in the
8323    /// [`fuchsia.sysmem2/Node`] heirarchy.
8324    ///
8325    /// This method can be used to add more participants, by transferring the
8326    /// newly created tokens to additional participants.
8327    ///
8328    /// A new token will be returned for each entry in the
8329    /// `rights_attenuation_masks` array.
8330    ///
8331    /// If the called token may not actually be a valid token due to a
8332    /// potentially hostile/untrusted provider of the token, consider using
8333    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8334    /// instead of potentially getting stuck indefinitely if
8335    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
8336    /// due to the calling token not being a real token.
8337    ///
8338    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
8339    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
8340    /// method, because the sync step is included in this call, at the cost of a
8341    /// round trip during this call.
8342    ///
8343    /// All tokens must be turned in to sysmem via
8344    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8345    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8346    /// successfully allocate buffers (or to logically allocate buffers in the
8347    /// case of subtrees involving
8348    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
8349    ///
8350    /// All table fields are currently required.
8351    ///
8352    /// + request `rights_attenuation_mask` In each entry of
8353    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
8354    ///   in the buffer VMO rights obtainable via the corresponding returned
8355    ///   token. This allows an initiator or intermediary participant to
8356    ///   attenuate the rights available to a participant. This does not allow a
8357    ///   participant to gain rights that the participant doesn't already have.
8358    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
8359    ///   attenuation should be applied.
8360    /// - response `tokens` The client ends of each newly created token.
8361    DuplicateSync {
8362        payload: BufferCollectionTokenDuplicateSyncRequest,
8363        responder: BufferCollectionTokenDuplicateSyncResponder,
8364    },
8365    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
8366    /// one, referring to the same buffer collection.
8367    ///
8368    /// The created token is a child of this token in the
8369    /// [`fuchsia.sysmem2/Node`] heirarchy.
8370    ///
8371    /// This method can be used to add a participant, by transferring the newly
8372    /// created token to another participant.
8373    ///
8374    /// This one-way message can be used instead of the two-way
8375    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
8376    /// performance sensitive cases where it would be undesireable to wait for
8377    /// sysmem to respond to
8378    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
8379    /// client code isn't structured to make it easy to duplicate all the needed
8380    /// tokens at once.
8381    ///
8382    /// After sending one or more `Duplicate` messages, and before sending the
8383    /// newly created child tokens to other participants (or to other
8384    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
8385    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
8386    /// `Sync` call can be made on the token, or on the `BufferCollection`
8387    /// obtained by passing this token to `BindSharedCollection`.  Either will
8388    /// ensure that the server knows about the tokens created via `Duplicate`
8389    /// before the other participant sends the token to the server via separate
8390    /// `Allocator` channel.
8391    ///
8392    /// All tokens must be turned in via
8393    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8394    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8395    /// successfully allocate buffers.
8396    ///
8397    /// All table fields are currently required.
8398    ///
8399    /// + request `rights_attenuation_mask` The rights bits that are zero in
8400    ///   this mask will be absent in the buffer VMO rights obtainable via the
8401    ///   client end of `token_request`. This allows an initiator or
8402    ///   intermediary participant to attenuate the rights available to a
8403    ///   delegate participant. This does not allow a participant to gain rights
8404    ///   that the participant doesn't already have. The value
8405    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
8406    ///   should be applied.
8407    ///   + These values for rights_attenuation_mask result in no attenuation:
8408    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
8409    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
8410    ///       computed)
8411    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
8412    /// + request `token_request` is the server end of a `BufferCollectionToken`
8413    ///   channel. The client end of this channel acts as another participant in
8414    ///   the shared buffer collection.
8415    Duplicate {
8416        payload: BufferCollectionTokenDuplicateRequest,
8417        control_handle: BufferCollectionTokenControlHandle,
8418    },
8419    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
8420    ///
8421    /// When the `BufferCollectionToken` is converted to a
8422    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
8423    /// the `BufferCollection` also.
8424    ///
8425    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
8426    /// client end without having sent
8427    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
8428    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
8429    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
8430    /// to the root `Node`, which fails the whole buffer collection. In
8431    /// contrast, a dispensable `Node` can fail after buffers are allocated
8432    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
8433    /// heirarchy.
8434    ///
8435    /// The dispensable `Node` participates in constraints aggregation along
8436    /// with its parent before buffer allocation. If the dispensable `Node`
8437    /// fails before buffers are allocated, the failure propagates to the
8438    /// dispensable `Node`'s parent.
8439    ///
8440    /// After buffers are allocated, failure of the dispensable `Node` (or any
8441    /// child of the dispensable `Node`) does not propagate to the dispensable
8442    /// `Node`'s parent. Failure does propagate from a normal child of a
8443    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
8444    /// blocked from reaching its parent if the child is attached using
8445    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
8446    /// dispensable and the failure occurred after allocation.
8447    ///
8448    /// A dispensable `Node` can be used in cases where a participant needs to
8449    /// provide constraints, but after buffers are allocated, the participant
8450    /// can fail without causing buffer collection failure from the parent
8451    /// `Node`'s point of view.
8452    ///
8453    /// In contrast, `BufferCollection.AttachToken` can be used to create a
8454    /// `BufferCollectionToken` which does not participate in constraints
8455    /// aggregation with its parent `Node`, and whose failure at any time does
8456    /// not propagate to its parent `Node`, and whose potential delay providing
8457    /// constraints does not prevent the parent `Node` from completing its
8458    /// buffer allocation.
8459    ///
8460    /// An initiator (creator of the root `Node` using
8461    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
8462    /// scenarios choose to initially use a dispensable `Node` for a first
8463    /// instance of a participant, and then later if the first instance of that
8464    /// participant fails, a new second instance of that participant my be given
8465    /// a `BufferCollectionToken` created with `AttachToken`.
8466    ///
8467    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
8468    /// shortly before sending the dispensable `BufferCollectionToken` to a
8469    /// delegate participant. Because `SetDispensable` prevents propagation of
8470    /// child `Node` failure to parent `Node`(s), if the client was relying on
8471    /// noticing child failure via failure of the parent `Node` retained by the
8472    /// client, the client may instead need to notice failure via other means.
8473    /// If other means aren't available/convenient, the client can instead
8474    /// retain the dispensable `Node` and create a child `Node` under that to
8475    /// send to the delegate participant, retaining this `Node` in order to
8476    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
8477    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
8478    /// (e.g. starting a new instance of the delegate participant and handing it
8479    /// a `BufferCollectionToken` created using
8480    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
8481    /// and clean up in a client-specific way).
8482    ///
8483    /// While it is possible (and potentially useful) to `SetDispensable` on a
8484    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
8485    /// to later replace a failed dispensable `Node` that was a direct child of
8486    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
8487    /// (since there's no `AttachToken` on a group). Instead, to enable
8488    /// `AttachToken` replacement in this case, create an additional
8489    /// non-dispensable token that's a direct child of the group and make the
8490    /// existing dispensable token a child of the additional token.  This way,
8491    /// the additional token that is a direct child of the group has
8492    /// `BufferCollection.AttachToken` which can be used to replace the failed
8493    /// dispensable token.
8494    ///
8495    /// `SetDispensable` on an already-dispensable token is idempotent.
8496    SetDispensable { control_handle: BufferCollectionTokenControlHandle },
8497    /// Create a logical OR among a set of tokens, called a
8498    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8499    ///
8500    /// Most sysmem clients and many participants don't need to care about this
8501    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
8502    /// a participant wants to attempt to include one set of delegate
8503    /// participants, but if constraints don't combine successfully that way,
8504    /// fall back to a different (possibly overlapping) set of delegate
8505    /// participants, and/or fall back to a less demanding strategy (in terms of
8506    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
8507    /// across all involved delegate participants). In such cases, a
8508    /// `BufferCollectionTokenGroup` is useful.
8509    ///
8510    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
8511    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
8512    /// which are not selected during aggregation will fail (close), which a
8513    /// potential participant should notice when their `BufferCollection`
8514    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
8515    /// clean up the speculative usage that didn't end up happening (this is
8516    /// simimlar to a normal `BufferCollection` server end closing on failure to
8517    /// allocate a logical buffer collection or later async failure of a buffer
8518    /// collection).
8519    ///
8520    /// See comments on protocol `BufferCollectionTokenGroup`.
8521    ///
8522    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
8523    /// applied to the whole group can be achieved with a
8524    /// `BufferCollectionToken` for this purpose as a direct parent of the
8525    /// `BufferCollectionTokenGroup`.
8526    ///
8527    /// All table fields are currently required.
8528    ///
8529    /// + request `group_request` The server end of a
8530    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
8531    CreateBufferCollectionTokenGroup {
8532        payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8533        control_handle: BufferCollectionTokenControlHandle,
8534    },
8535    /// An interaction was received which does not match any known method.
8536    #[non_exhaustive]
8537    _UnknownMethod {
8538        /// Ordinal of the method that was called.
8539        ordinal: u64,
8540        control_handle: BufferCollectionTokenControlHandle,
8541        method_type: fidl::MethodType,
8542    },
8543}
8544
8545impl BufferCollectionTokenRequest {
8546    #[allow(irrefutable_let_patterns)]
8547    pub fn into_sync(self) -> Option<(BufferCollectionTokenSyncResponder)> {
8548        if let BufferCollectionTokenRequest::Sync { responder } = self {
8549            Some((responder))
8550        } else {
8551            None
8552        }
8553    }
8554
8555    #[allow(irrefutable_let_patterns)]
8556    pub fn into_release(self) -> Option<(BufferCollectionTokenControlHandle)> {
8557        if let BufferCollectionTokenRequest::Release { control_handle } = self {
8558            Some((control_handle))
8559        } else {
8560            None
8561        }
8562    }
8563
8564    #[allow(irrefutable_let_patterns)]
8565    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionTokenControlHandle)> {
8566        if let BufferCollectionTokenRequest::SetName { payload, control_handle } = self {
8567            Some((payload, control_handle))
8568        } else {
8569            None
8570        }
8571    }
8572
8573    #[allow(irrefutable_let_patterns)]
8574    pub fn into_set_debug_client_info(
8575        self,
8576    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenControlHandle)> {
8577        if let BufferCollectionTokenRequest::SetDebugClientInfo { payload, control_handle } = self {
8578            Some((payload, control_handle))
8579        } else {
8580            None
8581        }
8582    }
8583
8584    #[allow(irrefutable_let_patterns)]
8585    pub fn into_set_debug_timeout_log_deadline(
8586        self,
8587    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenControlHandle)> {
8588        if let BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {
8589            payload,
8590            control_handle,
8591        } = self
8592        {
8593            Some((payload, control_handle))
8594        } else {
8595            None
8596        }
8597    }
8598
8599    #[allow(irrefutable_let_patterns)]
8600    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenControlHandle)> {
8601        if let BufferCollectionTokenRequest::SetVerboseLogging { control_handle } = self {
8602            Some((control_handle))
8603        } else {
8604            None
8605        }
8606    }
8607
8608    #[allow(irrefutable_let_patterns)]
8609    pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGetNodeRefResponder)> {
8610        if let BufferCollectionTokenRequest::GetNodeRef { responder } = self {
8611            Some((responder))
8612        } else {
8613            None
8614        }
8615    }
8616
8617    #[allow(irrefutable_let_patterns)]
8618    pub fn into_is_alternate_for(
8619        self,
8620    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenIsAlternateForResponder)> {
8621        if let BufferCollectionTokenRequest::IsAlternateFor { payload, responder } = self {
8622            Some((payload, responder))
8623        } else {
8624            None
8625        }
8626    }
8627
8628    #[allow(irrefutable_let_patterns)]
8629    pub fn into_get_buffer_collection_id(
8630        self,
8631    ) -> Option<(BufferCollectionTokenGetBufferCollectionIdResponder)> {
8632        if let BufferCollectionTokenRequest::GetBufferCollectionId { responder } = self {
8633            Some((responder))
8634        } else {
8635            None
8636        }
8637    }
8638
8639    #[allow(irrefutable_let_patterns)]
8640    pub fn into_set_weak(self) -> Option<(BufferCollectionTokenControlHandle)> {
8641        if let BufferCollectionTokenRequest::SetWeak { control_handle } = self {
8642            Some((control_handle))
8643        } else {
8644            None
8645        }
8646    }
8647
8648    #[allow(irrefutable_let_patterns)]
8649    pub fn into_set_weak_ok(
8650        self,
8651    ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenControlHandle)> {
8652        if let BufferCollectionTokenRequest::SetWeakOk { payload, control_handle } = self {
8653            Some((payload, control_handle))
8654        } else {
8655            None
8656        }
8657    }
8658
8659    #[allow(irrefutable_let_patterns)]
8660    pub fn into_attach_node_tracking(
8661        self,
8662    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenControlHandle)> {
8663        if let BufferCollectionTokenRequest::AttachNodeTracking { payload, control_handle } = self {
8664            Some((payload, control_handle))
8665        } else {
8666            None
8667        }
8668    }
8669
8670    #[allow(irrefutable_let_patterns)]
8671    pub fn into_duplicate_sync(
8672        self,
8673    ) -> Option<(
8674        BufferCollectionTokenDuplicateSyncRequest,
8675        BufferCollectionTokenDuplicateSyncResponder,
8676    )> {
8677        if let BufferCollectionTokenRequest::DuplicateSync { payload, responder } = self {
8678            Some((payload, responder))
8679        } else {
8680            None
8681        }
8682    }
8683
8684    #[allow(irrefutable_let_patterns)]
8685    pub fn into_duplicate(
8686        self,
8687    ) -> Option<(BufferCollectionTokenDuplicateRequest, BufferCollectionTokenControlHandle)> {
8688        if let BufferCollectionTokenRequest::Duplicate { payload, control_handle } = self {
8689            Some((payload, control_handle))
8690        } else {
8691            None
8692        }
8693    }
8694
8695    #[allow(irrefutable_let_patterns)]
8696    pub fn into_set_dispensable(self) -> Option<(BufferCollectionTokenControlHandle)> {
8697        if let BufferCollectionTokenRequest::SetDispensable { control_handle } = self {
8698            Some((control_handle))
8699        } else {
8700            None
8701        }
8702    }
8703
8704    #[allow(irrefutable_let_patterns)]
8705    pub fn into_create_buffer_collection_token_group(
8706        self,
8707    ) -> Option<(
8708        BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8709        BufferCollectionTokenControlHandle,
8710    )> {
8711        if let BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {
8712            payload,
8713            control_handle,
8714        } = self
8715        {
8716            Some((payload, control_handle))
8717        } else {
8718            None
8719        }
8720    }
8721
8722    /// Name of the method defined in FIDL
8723    pub fn method_name(&self) -> &'static str {
8724        match *self {
8725            BufferCollectionTokenRequest::Sync { .. } => "sync",
8726            BufferCollectionTokenRequest::Release { .. } => "release",
8727            BufferCollectionTokenRequest::SetName { .. } => "set_name",
8728            BufferCollectionTokenRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
8729            BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline { .. } => {
8730                "set_debug_timeout_log_deadline"
8731            }
8732            BufferCollectionTokenRequest::SetVerboseLogging { .. } => "set_verbose_logging",
8733            BufferCollectionTokenRequest::GetNodeRef { .. } => "get_node_ref",
8734            BufferCollectionTokenRequest::IsAlternateFor { .. } => "is_alternate_for",
8735            BufferCollectionTokenRequest::GetBufferCollectionId { .. } => {
8736                "get_buffer_collection_id"
8737            }
8738            BufferCollectionTokenRequest::SetWeak { .. } => "set_weak",
8739            BufferCollectionTokenRequest::SetWeakOk { .. } => "set_weak_ok",
8740            BufferCollectionTokenRequest::AttachNodeTracking { .. } => "attach_node_tracking",
8741            BufferCollectionTokenRequest::DuplicateSync { .. } => "duplicate_sync",
8742            BufferCollectionTokenRequest::Duplicate { .. } => "duplicate",
8743            BufferCollectionTokenRequest::SetDispensable { .. } => "set_dispensable",
8744            BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup { .. } => {
8745                "create_buffer_collection_token_group"
8746            }
8747            BufferCollectionTokenRequest::_UnknownMethod {
8748                method_type: fidl::MethodType::OneWay,
8749                ..
8750            } => "unknown one-way method",
8751            BufferCollectionTokenRequest::_UnknownMethod {
8752                method_type: fidl::MethodType::TwoWay,
8753                ..
8754            } => "unknown two-way method",
8755        }
8756    }
8757}
8758
8759#[derive(Debug, Clone)]
8760pub struct BufferCollectionTokenControlHandle {
8761    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
8762}
8763
8764impl fidl::endpoints::ControlHandle for BufferCollectionTokenControlHandle {
8765    fn shutdown(&self) {
8766        self.inner.shutdown()
8767    }
8768    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
8769        self.inner.shutdown_with_epitaph(status)
8770    }
8771
8772    fn is_closed(&self) -> bool {
8773        self.inner.channel().is_closed()
8774    }
8775    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
8776        self.inner.channel().on_closed()
8777    }
8778
8779    #[cfg(target_os = "fuchsia")]
8780    fn signal_peer(
8781        &self,
8782        clear_mask: zx::Signals,
8783        set_mask: zx::Signals,
8784    ) -> Result<(), zx_status::Status> {
8785        use fidl::Peered;
8786        self.inner.channel().signal_peer(clear_mask, set_mask)
8787    }
8788}
8789
8790impl BufferCollectionTokenControlHandle {}
8791
8792#[must_use = "FIDL methods require a response to be sent"]
8793#[derive(Debug)]
8794pub struct BufferCollectionTokenSyncResponder {
8795    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8796    tx_id: u32,
8797}
8798
8799/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8800/// if the responder is dropped without sending a response, so that the client
8801/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8802impl std::ops::Drop for BufferCollectionTokenSyncResponder {
8803    fn drop(&mut self) {
8804        self.control_handle.shutdown();
8805        // Safety: drops once, never accessed again
8806        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8807    }
8808}
8809
8810impl fidl::endpoints::Responder for BufferCollectionTokenSyncResponder {
8811    type ControlHandle = BufferCollectionTokenControlHandle;
8812
8813    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8814        &self.control_handle
8815    }
8816
8817    fn drop_without_shutdown(mut self) {
8818        // Safety: drops once, never accessed again due to mem::forget
8819        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8820        // Prevent Drop from running (which would shut down the channel)
8821        std::mem::forget(self);
8822    }
8823}
8824
8825impl BufferCollectionTokenSyncResponder {
8826    /// Sends a response to the FIDL transaction.
8827    ///
8828    /// Sets the channel to shutdown if an error occurs.
8829    pub fn send(self) -> Result<(), fidl::Error> {
8830        let _result = self.send_raw();
8831        if _result.is_err() {
8832            self.control_handle.shutdown();
8833        }
8834        self.drop_without_shutdown();
8835        _result
8836    }
8837
8838    /// Similar to "send" but does not shutdown the channel if an error occurs.
8839    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
8840        let _result = self.send_raw();
8841        self.drop_without_shutdown();
8842        _result
8843    }
8844
8845    fn send_raw(&self) -> Result<(), fidl::Error> {
8846        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
8847            fidl::encoding::Flexible::new(()),
8848            self.tx_id,
8849            0x11ac2555cf575b54,
8850            fidl::encoding::DynamicFlags::FLEXIBLE,
8851        )
8852    }
8853}
8854
8855#[must_use = "FIDL methods require a response to be sent"]
8856#[derive(Debug)]
8857pub struct BufferCollectionTokenGetNodeRefResponder {
8858    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8859    tx_id: u32,
8860}
8861
8862/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8863/// if the responder is dropped without sending a response, so that the client
8864/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8865impl std::ops::Drop for BufferCollectionTokenGetNodeRefResponder {
8866    fn drop(&mut self) {
8867        self.control_handle.shutdown();
8868        // Safety: drops once, never accessed again
8869        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8870    }
8871}
8872
8873impl fidl::endpoints::Responder for BufferCollectionTokenGetNodeRefResponder {
8874    type ControlHandle = BufferCollectionTokenControlHandle;
8875
8876    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8877        &self.control_handle
8878    }
8879
8880    fn drop_without_shutdown(mut self) {
8881        // Safety: drops once, never accessed again due to mem::forget
8882        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8883        // Prevent Drop from running (which would shut down the channel)
8884        std::mem::forget(self);
8885    }
8886}
8887
8888impl BufferCollectionTokenGetNodeRefResponder {
8889    /// Sends a response to the FIDL transaction.
8890    ///
8891    /// Sets the channel to shutdown if an error occurs.
8892    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8893        let _result = self.send_raw(payload);
8894        if _result.is_err() {
8895            self.control_handle.shutdown();
8896        }
8897        self.drop_without_shutdown();
8898        _result
8899    }
8900
8901    /// Similar to "send" but does not shutdown the channel if an error occurs.
8902    pub fn send_no_shutdown_on_err(
8903        self,
8904        mut payload: NodeGetNodeRefResponse,
8905    ) -> Result<(), fidl::Error> {
8906        let _result = self.send_raw(payload);
8907        self.drop_without_shutdown();
8908        _result
8909    }
8910
8911    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8912        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
8913            fidl::encoding::Flexible::new(&mut payload),
8914            self.tx_id,
8915            0x5b3d0e51614df053,
8916            fidl::encoding::DynamicFlags::FLEXIBLE,
8917        )
8918    }
8919}
8920
8921#[must_use = "FIDL methods require a response to be sent"]
8922#[derive(Debug)]
8923pub struct BufferCollectionTokenIsAlternateForResponder {
8924    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8925    tx_id: u32,
8926}
8927
8928/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8929/// if the responder is dropped without sending a response, so that the client
8930/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8931impl std::ops::Drop for BufferCollectionTokenIsAlternateForResponder {
8932    fn drop(&mut self) {
8933        self.control_handle.shutdown();
8934        // Safety: drops once, never accessed again
8935        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8936    }
8937}
8938
8939impl fidl::endpoints::Responder for BufferCollectionTokenIsAlternateForResponder {
8940    type ControlHandle = BufferCollectionTokenControlHandle;
8941
8942    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8943        &self.control_handle
8944    }
8945
8946    fn drop_without_shutdown(mut self) {
8947        // Safety: drops once, never accessed again due to mem::forget
8948        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8949        // Prevent Drop from running (which would shut down the channel)
8950        std::mem::forget(self);
8951    }
8952}
8953
8954impl BufferCollectionTokenIsAlternateForResponder {
8955    /// Sends a response to the FIDL transaction.
8956    ///
8957    /// Sets the channel to shutdown if an error occurs.
8958    pub fn send(
8959        self,
8960        mut result: Result<&NodeIsAlternateForResponse, Error>,
8961    ) -> Result<(), fidl::Error> {
8962        let _result = self.send_raw(result);
8963        if _result.is_err() {
8964            self.control_handle.shutdown();
8965        }
8966        self.drop_without_shutdown();
8967        _result
8968    }
8969
8970    /// Similar to "send" but does not shutdown the channel if an error occurs.
8971    pub fn send_no_shutdown_on_err(
8972        self,
8973        mut result: Result<&NodeIsAlternateForResponse, Error>,
8974    ) -> Result<(), fidl::Error> {
8975        let _result = self.send_raw(result);
8976        self.drop_without_shutdown();
8977        _result
8978    }
8979
8980    fn send_raw(
8981        &self,
8982        mut result: Result<&NodeIsAlternateForResponse, Error>,
8983    ) -> Result<(), fidl::Error> {
8984        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
8985            NodeIsAlternateForResponse,
8986            Error,
8987        >>(
8988            fidl::encoding::FlexibleResult::new(result),
8989            self.tx_id,
8990            0x3a58e00157e0825,
8991            fidl::encoding::DynamicFlags::FLEXIBLE,
8992        )
8993    }
8994}
8995
8996#[must_use = "FIDL methods require a response to be sent"]
8997#[derive(Debug)]
8998pub struct BufferCollectionTokenGetBufferCollectionIdResponder {
8999    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9000    tx_id: u32,
9001}
9002
9003/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9004/// if the responder is dropped without sending a response, so that the client
9005/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9006impl std::ops::Drop for BufferCollectionTokenGetBufferCollectionIdResponder {
9007    fn drop(&mut self) {
9008        self.control_handle.shutdown();
9009        // Safety: drops once, never accessed again
9010        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9011    }
9012}
9013
9014impl fidl::endpoints::Responder for BufferCollectionTokenGetBufferCollectionIdResponder {
9015    type ControlHandle = BufferCollectionTokenControlHandle;
9016
9017    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9018        &self.control_handle
9019    }
9020
9021    fn drop_without_shutdown(mut self) {
9022        // Safety: drops once, never accessed again due to mem::forget
9023        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9024        // Prevent Drop from running (which would shut down the channel)
9025        std::mem::forget(self);
9026    }
9027}
9028
9029impl BufferCollectionTokenGetBufferCollectionIdResponder {
9030    /// Sends a response to the FIDL transaction.
9031    ///
9032    /// Sets the channel to shutdown if an error occurs.
9033    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9034        let _result = self.send_raw(payload);
9035        if _result.is_err() {
9036            self.control_handle.shutdown();
9037        }
9038        self.drop_without_shutdown();
9039        _result
9040    }
9041
9042    /// Similar to "send" but does not shutdown the channel if an error occurs.
9043    pub fn send_no_shutdown_on_err(
9044        self,
9045        mut payload: &NodeGetBufferCollectionIdResponse,
9046    ) -> Result<(), fidl::Error> {
9047        let _result = self.send_raw(payload);
9048        self.drop_without_shutdown();
9049        _result
9050    }
9051
9052    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9053        self.control_handle
9054            .inner
9055            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
9056                fidl::encoding::Flexible::new(payload),
9057                self.tx_id,
9058                0x77d19a494b78ba8c,
9059                fidl::encoding::DynamicFlags::FLEXIBLE,
9060            )
9061    }
9062}
9063
9064#[must_use = "FIDL methods require a response to be sent"]
9065#[derive(Debug)]
9066pub struct BufferCollectionTokenDuplicateSyncResponder {
9067    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9068    tx_id: u32,
9069}
9070
9071/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9072/// if the responder is dropped without sending a response, so that the client
9073/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9074impl std::ops::Drop for BufferCollectionTokenDuplicateSyncResponder {
9075    fn drop(&mut self) {
9076        self.control_handle.shutdown();
9077        // Safety: drops once, never accessed again
9078        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9079    }
9080}
9081
9082impl fidl::endpoints::Responder for BufferCollectionTokenDuplicateSyncResponder {
9083    type ControlHandle = BufferCollectionTokenControlHandle;
9084
9085    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9086        &self.control_handle
9087    }
9088
9089    fn drop_without_shutdown(mut self) {
9090        // Safety: drops once, never accessed again due to mem::forget
9091        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9092        // Prevent Drop from running (which would shut down the channel)
9093        std::mem::forget(self);
9094    }
9095}
9096
9097impl BufferCollectionTokenDuplicateSyncResponder {
9098    /// Sends a response to the FIDL transaction.
9099    ///
9100    /// Sets the channel to shutdown if an error occurs.
9101    pub fn send(
9102        self,
9103        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9104    ) -> Result<(), fidl::Error> {
9105        let _result = self.send_raw(payload);
9106        if _result.is_err() {
9107            self.control_handle.shutdown();
9108        }
9109        self.drop_without_shutdown();
9110        _result
9111    }
9112
9113    /// Similar to "send" but does not shutdown the channel if an error occurs.
9114    pub fn send_no_shutdown_on_err(
9115        self,
9116        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9117    ) -> Result<(), fidl::Error> {
9118        let _result = self.send_raw(payload);
9119        self.drop_without_shutdown();
9120        _result
9121    }
9122
9123    fn send_raw(
9124        &self,
9125        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9126    ) -> Result<(), fidl::Error> {
9127        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
9128            BufferCollectionTokenDuplicateSyncResponse,
9129        >>(
9130            fidl::encoding::Flexible::new(&mut payload),
9131            self.tx_id,
9132            0x1c1af9919d1ca45c,
9133            fidl::encoding::DynamicFlags::FLEXIBLE,
9134        )
9135    }
9136}
9137
9138#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
9139pub struct BufferCollectionTokenGroupMarker;
9140
9141impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenGroupMarker {
9142    type Proxy = BufferCollectionTokenGroupProxy;
9143    type RequestStream = BufferCollectionTokenGroupRequestStream;
9144    #[cfg(target_os = "fuchsia")]
9145    type SynchronousProxy = BufferCollectionTokenGroupSynchronousProxy;
9146
9147    const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionTokenGroup";
9148}
9149
9150pub trait BufferCollectionTokenGroupProxyInterface: Send + Sync {
9151    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
9152    fn r#sync(&self) -> Self::SyncResponseFut;
9153    fn r#release(&self) -> Result<(), fidl::Error>;
9154    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
9155    fn r#set_debug_client_info(
9156        &self,
9157        payload: &NodeSetDebugClientInfoRequest,
9158    ) -> Result<(), fidl::Error>;
9159    fn r#set_debug_timeout_log_deadline(
9160        &self,
9161        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9162    ) -> Result<(), fidl::Error>;
9163    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
9164    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
9165        + Send;
9166    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
9167    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
9168        + Send;
9169    fn r#is_alternate_for(
9170        &self,
9171        payload: NodeIsAlternateForRequest,
9172    ) -> Self::IsAlternateForResponseFut;
9173    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
9174        + Send;
9175    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
9176    fn r#set_weak(&self) -> Result<(), fidl::Error>;
9177    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
9178    fn r#attach_node_tracking(
9179        &self,
9180        payload: NodeAttachNodeTrackingRequest,
9181    ) -> Result<(), fidl::Error>;
9182    fn r#create_child(
9183        &self,
9184        payload: BufferCollectionTokenGroupCreateChildRequest,
9185    ) -> Result<(), fidl::Error>;
9186    type CreateChildrenSyncResponseFut: std::future::Future<
9187            Output = Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error>,
9188        > + Send;
9189    fn r#create_children_sync(
9190        &self,
9191        payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9192    ) -> Self::CreateChildrenSyncResponseFut;
9193    fn r#all_children_present(&self) -> Result<(), fidl::Error>;
9194}
9195#[derive(Debug)]
9196#[cfg(target_os = "fuchsia")]
9197pub struct BufferCollectionTokenGroupSynchronousProxy {
9198    client: fidl::client::sync::Client,
9199}
9200
9201#[cfg(target_os = "fuchsia")]
9202impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenGroupSynchronousProxy {
9203    type Proxy = BufferCollectionTokenGroupProxy;
9204    type Protocol = BufferCollectionTokenGroupMarker;
9205
9206    fn from_channel(inner: fidl::Channel) -> Self {
9207        Self::new(inner)
9208    }
9209
9210    fn into_channel(self) -> fidl::Channel {
9211        self.client.into_channel()
9212    }
9213
9214    fn as_channel(&self) -> &fidl::Channel {
9215        self.client.as_channel()
9216    }
9217}
9218
9219#[cfg(target_os = "fuchsia")]
9220impl BufferCollectionTokenGroupSynchronousProxy {
9221    pub fn new(channel: fidl::Channel) -> Self {
9222        let protocol_name =
9223            <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9224        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
9225    }
9226
9227    pub fn into_channel(self) -> fidl::Channel {
9228        self.client.into_channel()
9229    }
9230
9231    /// Waits until an event arrives and returns it. It is safe for other
9232    /// threads to make concurrent requests while waiting for an event.
9233    pub fn wait_for_event(
9234        &self,
9235        deadline: zx::MonotonicInstant,
9236    ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
9237        BufferCollectionTokenGroupEvent::decode(self.client.wait_for_event(deadline)?)
9238    }
9239
9240    /// Ensure that previous messages have been received server side. This is
9241    /// particularly useful after previous messages that created new tokens,
9242    /// because a token must be known to the sysmem server before sending the
9243    /// token to another participant.
9244    ///
9245    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9246    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9247    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9248    /// to mitigate the possibility of a hostile/fake
9249    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9250    /// Another way is to pass the token to
9251    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9252    /// the token as part of exchanging it for a
9253    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9254    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9255    /// of stalling.
9256    ///
9257    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9258    /// and then starting and completing a `Sync`, it's then safe to send the
9259    /// `BufferCollectionToken` client ends to other participants knowing the
9260    /// server will recognize the tokens when they're sent by the other
9261    /// participants to sysmem in a
9262    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9263    /// efficient way to create tokens while avoiding unnecessary round trips.
9264    ///
9265    /// Other options include waiting for each
9266    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9267    /// individually (using separate call to `Sync` after each), or calling
9268    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9269    /// converted to a `BufferCollection` via
9270    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9271    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9272    /// the sync step and can create multiple tokens at once.
9273    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
9274        let _response = self.client.send_query::<
9275            fidl::encoding::EmptyPayload,
9276            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
9277        >(
9278            (),
9279            0x11ac2555cf575b54,
9280            fidl::encoding::DynamicFlags::FLEXIBLE,
9281            ___deadline,
9282        )?
9283        .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
9284        Ok(_response)
9285    }
9286
9287    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9288    ///
9289    /// Normally a participant will convert a `BufferCollectionToken` into a
9290    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9291    /// `Release` via the token (and then close the channel immediately or
9292    /// shortly later in response to server closing the server end), which
9293    /// avoids causing buffer collection failure. Without a prior `Release`,
9294    /// closing the `BufferCollectionToken` client end will cause buffer
9295    /// collection failure.
9296    ///
9297    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9298    ///
9299    /// By default the server handles unexpected closure of a
9300    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9301    /// first) by failing the buffer collection. Partly this is to expedite
9302    /// closing VMO handles to reclaim memory when any participant fails. If a
9303    /// participant would like to cleanly close a `BufferCollection` without
9304    /// causing buffer collection failure, the participant can send `Release`
9305    /// before closing the `BufferCollection` client end. The `Release` can
9306    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
9307    /// buffer collection won't require constraints from this node in order to
9308    /// allocate. If after `SetConstraints`, the constraints are retained and
9309    /// aggregated, despite the lack of `BufferCollection` connection at the
9310    /// time of constraints aggregation.
9311    ///
9312    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
9313    ///
9314    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
9315    /// end (without `Release` first) will trigger failure of the buffer
9316    /// collection. To close a `BufferCollectionTokenGroup` channel without
9317    /// failing the buffer collection, ensure that AllChildrenPresent() has been
9318    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
9319    /// client end.
9320    ///
9321    /// If `Release` occurs before
9322    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
9323    /// buffer collection will fail (triggered by reception of `Release` without
9324    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
9325    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
9326    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
9327    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
9328    /// close requires `AllChildrenPresent` (if not already sent), then
9329    /// `Release`, then close client end.
9330    ///
9331    /// If `Release` occurs after `AllChildrenPresent`, the children and all
9332    /// their constraints remain intact (just as they would if the
9333    /// `BufferCollectionTokenGroup` channel had remained open), and the client
9334    /// end close doesn't trigger buffer collection failure.
9335    ///
9336    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
9337    ///
9338    /// For brevity, the per-channel-protocol paragraphs above ignore the
9339    /// separate failure domain created by
9340    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
9341    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
9342    /// unexpectedly closes (without `Release` first) and that client end is
9343    /// under a failure domain, instead of failing the whole buffer collection,
9344    /// the failure domain is failed, but the buffer collection itself is
9345    /// isolated from failure of the failure domain. Such failure domains can be
9346    /// nested, in which case only the inner-most failure domain in which the
9347    /// `Node` resides fails.
9348    pub fn r#release(&self) -> Result<(), fidl::Error> {
9349        self.client.send::<fidl::encoding::EmptyPayload>(
9350            (),
9351            0x6a5cae7d6d6e04c6,
9352            fidl::encoding::DynamicFlags::FLEXIBLE,
9353        )
9354    }
9355
9356    /// Set a name for VMOs in this buffer collection.
9357    ///
9358    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
9359    /// will be truncated to fit. The name of the vmo will be suffixed with the
9360    /// buffer index within the collection (if the suffix fits within
9361    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
9362    /// listed in the inspect data.
9363    ///
9364    /// The name only affects VMOs allocated after the name is set; this call
9365    /// does not rename existing VMOs. If multiple clients set different names
9366    /// then the larger priority value will win. Setting a new name with the
9367    /// same priority as a prior name doesn't change the name.
9368    ///
9369    /// All table fields are currently required.
9370    ///
9371    /// + request `priority` The name is only set if this is the first `SetName`
9372    ///   or if `priority` is greater than any previous `priority` value in
9373    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
9374    /// + request `name` The name for VMOs created under this buffer collection.
9375    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
9376        self.client.send::<NodeSetNameRequest>(
9377            payload,
9378            0xb41f1624f48c1e9,
9379            fidl::encoding::DynamicFlags::FLEXIBLE,
9380        )
9381    }
9382
9383    /// Set information about the current client that can be used by sysmem to
9384    /// help diagnose leaking memory and allocation stalls waiting for a
9385    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
9386    ///
9387    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
9388    /// `Node`(s) derived from this `Node`, unless overriden by
9389    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
9390    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
9391    ///
9392    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
9393    /// `Allocator` is the most efficient way to ensure that all
9394    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
9395    /// set, and is also more efficient than separately sending the same debug
9396    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
9397    /// created [`fuchsia.sysmem2/Node`].
9398    ///
9399    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
9400    /// indicate which client is closing their channel first, leading to subtree
9401    /// failure (which can be normal if the purpose of the subtree is over, but
9402    /// if happening earlier than expected, the client-channel-specific name can
9403    /// help diagnose where the failure is first coming from, from sysmem's
9404    /// point of view).
9405    ///
9406    /// All table fields are currently required.
9407    ///
9408    /// + request `name` This can be an arbitrary string, but the current
9409    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
9410    /// + request `id` This can be an arbitrary id, but the current process ID
9411    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
9412    pub fn r#set_debug_client_info(
9413        &self,
9414        mut payload: &NodeSetDebugClientInfoRequest,
9415    ) -> Result<(), fidl::Error> {
9416        self.client.send::<NodeSetDebugClientInfoRequest>(
9417            payload,
9418            0x5cde8914608d99b1,
9419            fidl::encoding::DynamicFlags::FLEXIBLE,
9420        )
9421    }
9422
9423    /// Sysmem logs a warning if sysmem hasn't seen
9424    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
9425    /// within 5 seconds after creation of a new collection.
9426    ///
9427    /// Clients can call this method to change when the log is printed. If
9428    /// multiple client set the deadline, it's unspecified which deadline will
9429    /// take effect.
9430    ///
9431    /// In most cases the default works well.
9432    ///
9433    /// All table fields are currently required.
9434    ///
9435    /// + request `deadline` The time at which sysmem will start trying to log
9436    ///   the warning, unless all constraints are with sysmem by then.
9437    pub fn r#set_debug_timeout_log_deadline(
9438        &self,
9439        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9440    ) -> Result<(), fidl::Error> {
9441        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
9442            payload,
9443            0x716b0af13d5c0806,
9444            fidl::encoding::DynamicFlags::FLEXIBLE,
9445        )
9446    }
9447
9448    /// This enables verbose logging for the buffer collection.
9449    ///
9450    /// Verbose logging includes constraints set via
9451    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
9452    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
9453    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
9454    /// the tree of `Node`(s).
9455    ///
9456    /// Normally sysmem prints only a single line complaint when aggregation
9457    /// fails, with just the specific detailed reason that aggregation failed,
9458    /// with little surrounding context.  While this is often enough to diagnose
9459    /// a problem if only a small change was made and everything was working
9460    /// before the small change, it's often not particularly helpful for getting
9461    /// a new buffer collection to work for the first time.  Especially with
9462    /// more complex trees of nodes, involving things like
9463    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
9464    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
9465    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
9466    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
9467    /// looks like and why it's failing a logical allocation, or why a tree or
9468    /// subtree is failing sooner than expected.
9469    ///
9470    /// The intent of the extra logging is to be acceptable from a performance
9471    /// point of view, under the assumption that verbose logging is only enabled
9472    /// on a low number of buffer collections. If we're not tracking down a bug,
9473    /// we shouldn't send this message.
9474    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
9475        self.client.send::<fidl::encoding::EmptyPayload>(
9476            (),
9477            0x5209c77415b4dfad,
9478            fidl::encoding::DynamicFlags::FLEXIBLE,
9479        )
9480    }
9481
9482    /// This gets a handle that can be used as a parameter to
9483    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
9484    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
9485    /// client obtained this handle from this `Node`.
9486    ///
9487    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
9488    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
9489    /// despite the two calls typically being on different channels.
9490    ///
9491    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
9492    ///
9493    /// All table fields are currently required.
9494    ///
9495    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
9496    ///   different `Node` channel, to prove that the client obtained the handle
9497    ///   from this `Node`.
9498    pub fn r#get_node_ref(
9499        &self,
9500        ___deadline: zx::MonotonicInstant,
9501    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
9502        let _response = self.client.send_query::<
9503            fidl::encoding::EmptyPayload,
9504            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
9505        >(
9506            (),
9507            0x5b3d0e51614df053,
9508            fidl::encoding::DynamicFlags::FLEXIBLE,
9509            ___deadline,
9510        )?
9511        .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
9512        Ok(_response)
9513    }
9514
9515    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
9516    /// rooted at a different child token of a common parent
9517    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
9518    /// passed-in `node_ref`.
9519    ///
9520    /// This call is for assisting with admission control de-duplication, and
9521    /// with debugging.
9522    ///
9523    /// The `node_ref` must be obtained using
9524    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
9525    ///
9526    /// The `node_ref` can be a duplicated handle; it's not necessary to call
9527    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
9528    ///
9529    /// If a calling token may not actually be a valid token at all due to a
9530    /// potentially hostile/untrusted provider of the token, call
9531    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
9532    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
9533    /// never responds due to a calling token not being a real token (not really
9534    /// talking to sysmem).  Another option is to call
9535    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
9536    /// which also validates the token along with converting it to a
9537    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
9538    ///
9539    /// All table fields are currently required.
9540    ///
9541    /// - response `is_alternate`
9542    ///   - true: The first parent node in common between the calling node and
9543    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
9544    ///     that the calling `Node` and the `node_ref` `Node` will not have both
9545    ///     their constraints apply - rather sysmem will choose one or the other
9546    ///     of the constraints - never both.  This is because only one child of
9547    ///     a `BufferCollectionTokenGroup` is selected during logical
9548    ///     allocation, with only that one child's subtree contributing to
9549    ///     constraints aggregation.
9550    ///   - false: The first parent node in common between the calling `Node`
9551    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
9552    ///     Currently, this means the first parent node in common is a
9553    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
9554    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
9555    ///     `Node` may have both their constraints apply during constraints
9556    ///     aggregation of the logical allocation, if both `Node`(s) are
9557    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
9558    ///     this case, there is no `BufferCollectionTokenGroup` that will
9559    ///     directly prevent the two `Node`(s) from both being selected and
9560    ///     their constraints both aggregated, but even when false, one or both
9561    ///     `Node`(s) may still be eliminated from consideration if one or both
9562    ///     `Node`(s) has a direct or indirect parent
9563    ///     `BufferCollectionTokenGroup` which selects a child subtree other
9564    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
9565    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
9566    ///   associated with the same buffer collection as the calling `Node`.
9567    ///   Another reason for this error is if the `node_ref` is an
9568    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
9569    ///   a real `node_ref` obtained from `GetNodeRef`.
9570    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
9571    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
9572    ///   the needed rights expected on a real `node_ref`.
9573    /// * No other failing status codes are returned by this call.  However,
9574    ///   sysmem may add additional codes in future, so the client should have
9575    ///   sensible default handling for any failing status code.
9576    pub fn r#is_alternate_for(
9577        &self,
9578        mut payload: NodeIsAlternateForRequest,
9579        ___deadline: zx::MonotonicInstant,
9580    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
9581        let _response = self.client.send_query::<
9582            NodeIsAlternateForRequest,
9583            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
9584        >(
9585            &mut payload,
9586            0x3a58e00157e0825,
9587            fidl::encoding::DynamicFlags::FLEXIBLE,
9588            ___deadline,
9589        )?
9590        .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
9591        Ok(_response.map(|x| x))
9592    }
9593
9594    /// Get the buffer collection ID. This ID is also available from
9595    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
9596    /// within the collection).
9597    ///
9598    /// This call is mainly useful in situations where we can't convey a
9599    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
9600    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
9601    /// handle, which can be joined back up with a `BufferCollection` client end
9602    /// that was created via a different path. Prefer to convey a
9603    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
9604    ///
9605    /// Trusting a `buffer_collection_id` value from a source other than sysmem
9606    /// is analogous to trusting a koid value from a source other than zircon.
9607    /// Both should be avoided unless really necessary, and both require
9608    /// caution. In some situations it may be reasonable to refer to a
9609    /// pre-established `BufferCollection` by `buffer_collection_id` via a
9610    /// protocol for efficiency reasons, but an incoming value purporting to be
9611    /// a `buffer_collection_id` is not sufficient alone to justify granting the
9612    /// sender of the `buffer_collection_id` any capability. The sender must
9613    /// first prove to a receiver that the sender has/had a VMO or has/had a
9614    /// `BufferCollectionToken` to the same collection by sending a handle that
9615    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
9616    /// `buffer_collection_id` value. The receiver should take care to avoid
9617    /// assuming that a sender had a `BufferCollectionToken` in cases where the
9618    /// sender has only proven that the sender had a VMO.
9619    ///
9620    /// - response `buffer_collection_id` This ID is unique per buffer
9621    ///   collection per boot. Each buffer is uniquely identified by the
9622    ///   `buffer_collection_id` and `buffer_index` together.
9623    pub fn r#get_buffer_collection_id(
9624        &self,
9625        ___deadline: zx::MonotonicInstant,
9626    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
9627        let _response = self.client.send_query::<
9628            fidl::encoding::EmptyPayload,
9629            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
9630        >(
9631            (),
9632            0x77d19a494b78ba8c,
9633            fidl::encoding::DynamicFlags::FLEXIBLE,
9634            ___deadline,
9635        )?
9636        .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
9637        Ok(_response)
9638    }
9639
9640    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
9641    /// created after this message to weak, which means that a client's `Node`
9642    /// client end (or a child created after this message) is not alone
9643    /// sufficient to keep allocated VMOs alive.
9644    ///
9645    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
9646    /// `close_weak_asap`.
9647    ///
9648    /// This message is only permitted before the `Node` becomes ready for
9649    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
9650    ///   * `BufferCollectionToken`: any time
9651    ///   * `BufferCollection`: before `SetConstraints`
9652    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
9653    ///
9654    /// Currently, no conversion from strong `Node` to weak `Node` after ready
9655    /// for allocation is provided, but a client can simulate that by creating
9656    /// an additional `Node` before allocation and setting that additional
9657    /// `Node` to weak, and then potentially at some point later sending
9658    /// `Release` and closing the client end of the client's strong `Node`, but
9659    /// keeping the client's weak `Node`.
9660    ///
9661    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
9662    /// collection failure (all `Node` client end(s) will see
9663    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
9664    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
9665    /// this situation until all `Node`(s) are ready for allocation. For initial
9666    /// allocation to succeed, at least one strong `Node` is required to exist
9667    /// at allocation time, but after that client receives VMO handles, that
9668    /// client can `BufferCollection.Release` and close the client end without
9669    /// causing this type of failure.
9670    ///
9671    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
9672    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
9673    /// separately as appropriate.
9674    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
9675        self.client.send::<fidl::encoding::EmptyPayload>(
9676            (),
9677            0x22dd3ea514eeffe1,
9678            fidl::encoding::DynamicFlags::FLEXIBLE,
9679        )
9680    }
9681
9682    /// This indicates to sysmem that the client is prepared to pay attention to
9683    /// `close_weak_asap`.
9684    ///
9685    /// If sent, this message must be before
9686    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
9687    ///
9688    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
9689    /// send this message before `WaitForAllBuffersAllocated`, or a parent
9690    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
9691    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
9692    /// trigger buffer collection failure.
9693    ///
9694    /// This message is necessary because weak sysmem VMOs have not always been
9695    /// a thing, so older clients are not aware of the need to pay attention to
9696    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
9697    /// sysmem weak VMO handles asap. By having this message and requiring
9698    /// participants to indicate their acceptance of this aspect of the overall
9699    /// protocol, we avoid situations where an older client is delivered a weak
9700    /// VMO without any way for sysmem to get that VMO to close quickly later
9701    /// (and on a per-buffer basis).
9702    ///
9703    /// A participant that doesn't handle `close_weak_asap` and also doesn't
9704    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
9705    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
9706    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
9707    /// same participant has a child/delegate which does retrieve VMOs, that
9708    /// child/delegate will need to send `SetWeakOk` before
9709    /// `WaitForAllBuffersAllocated`.
9710    ///
9711    /// + request `for_child_nodes_also` If present and true, this means direct
9712    ///   child nodes of this node created after this message plus all
9713    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
9714    ///   those nodes. Any child node of this node that was created before this
9715    ///   message is not included. This setting is "sticky" in the sense that a
9716    ///   subsequent `SetWeakOk` without this bool set to true does not reset
9717    ///   the server-side bool. If this creates a problem for a participant, a
9718    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
9719    ///   tokens instead, as appropriate. A participant should only set
9720    ///   `for_child_nodes_also` true if the participant can really promise to
9721    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
9722    ///   weak VMO handles held by participants holding the corresponding child
9723    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
9724    ///   which are using sysmem(1) can be weak, despite the clients of those
9725    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
9726    ///   direct way to find out about `close_weak_asap`. This only applies to
9727    ///   descendents of this `Node` which are using sysmem(1), not to this
9728    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
9729    ///   token, which will fail allocation unless an ancestor of this `Node`
9730    ///   specified `for_child_nodes_also` true.
9731    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
9732        self.client.send::<NodeSetWeakOkRequest>(
9733            &mut payload,
9734            0x38a44fc4d7724be9,
9735            fidl::encoding::DynamicFlags::FLEXIBLE,
9736        )
9737    }
9738
9739    /// The server_end will be closed after this `Node` and any child nodes have
9740    /// have released their buffer counts, making those counts available for
9741    /// reservation by a different `Node` via
9742    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
9743    ///
9744    /// The `Node` buffer counts may not be released until the entire tree of
9745    /// `Node`(s) is closed or failed, because
9746    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
9747    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
9748    /// `Node` buffer counts remain reserved until the orphaned node is later
9749    /// cleaned up.
9750    ///
9751    /// If the `Node` exceeds a fairly large number of attached eventpair server
9752    /// ends, a log message will indicate this and the `Node` (and the
9753    /// appropriate) sub-tree will fail.
9754    ///
9755    /// The `server_end` will remain open when
9756    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
9757    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
9758    /// [`fuchsia.sysmem2/BufferCollection`].
9759    ///
9760    /// This message can also be used with a
9761    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
9762    pub fn r#attach_node_tracking(
9763        &self,
9764        mut payload: NodeAttachNodeTrackingRequest,
9765    ) -> Result<(), fidl::Error> {
9766        self.client.send::<NodeAttachNodeTrackingRequest>(
9767            &mut payload,
9768            0x3f22f2a293d3cdac,
9769            fidl::encoding::DynamicFlags::FLEXIBLE,
9770        )
9771    }
9772
9773    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
9774    /// (including its children) will be selected during allocation (or logical
9775    /// allocation).
9776    ///
9777    /// Before passing the client end of this token to
9778    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
9779    /// [`fuchsia.sysmem2/Node.Sync`] after
9780    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
9781    /// Or the client can use
9782    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
9783    /// essentially includes the `Sync`.
9784    ///
9785    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9786    /// fail the group's subtree and close the connection.
9787    ///
9788    /// After all children have been created, send AllChildrenPresent.
9789    ///
9790    /// + request `token_request` The server end of the new token channel.
9791    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
9792    ///   token allows the holder to get the same rights to buffers as the
9793    ///   parent token (of the group) had. When the value isn't
9794    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
9795    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
9796    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
9797    ///   causes subtree failure.
9798    pub fn r#create_child(
9799        &self,
9800        mut payload: BufferCollectionTokenGroupCreateChildRequest,
9801    ) -> Result<(), fidl::Error> {
9802        self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
9803            &mut payload,
9804            0x41a0075d419f30c5,
9805            fidl::encoding::DynamicFlags::FLEXIBLE,
9806        )
9807    }
9808
9809    /// Create 1 or more child tokens at once, synchronously.  In contrast to
9810    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
9811    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
9812    /// of a returned token to
9813    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
9814    ///
9815    /// The lower-index child tokens are higher priority (attempted sooner) than
9816    /// higher-index child tokens.
9817    ///
9818    /// As per all child tokens, successful aggregation will choose exactly one
9819    /// child among all created children (across all children created across
9820    /// potentially multiple calls to
9821    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
9822    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
9823    ///
9824    /// The maximum permissible total number of children per group, and total
9825    /// number of nodes in an overall tree (from the root) are capped to limits
9826    /// which are not configurable via these protocols.
9827    ///
9828    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
9829    /// this will fail the group's subtree and close the connection.
9830    ///
9831    /// After all children have been created, send AllChildrenPresent.
9832    ///
9833    /// + request `rights_attentuation_masks` The size of the
9834    ///   `rights_attentuation_masks` determines the number of created child
9835    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
9836    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
9837    ///   other value, each 0 bit in the mask attenuates that right.
9838    /// - response `tokens` The created child tokens.
9839    pub fn r#create_children_sync(
9840        &self,
9841        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9842        ___deadline: zx::MonotonicInstant,
9843    ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
9844        let _response = self.client.send_query::<
9845            BufferCollectionTokenGroupCreateChildrenSyncRequest,
9846            fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
9847        >(
9848            payload,
9849            0x15dea448c536070a,
9850            fidl::encoding::DynamicFlags::FLEXIBLE,
9851            ___deadline,
9852        )?
9853        .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
9854        Ok(_response)
9855    }
9856
9857    /// Indicate that no more children will be created.
9858    ///
9859    /// After creating all children, the client should send
9860    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
9861    /// inform sysmem that no more children will be created, so that sysmem can
9862    /// know when it's ok to start aggregating constraints.
9863    ///
9864    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9865    /// fail the group's subtree and close the connection.
9866    ///
9867    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
9868    /// after `AllChildrenPresent`, else failure of the group's subtree will be
9869    /// triggered. This is intentionally not analogous to how `Release` without
9870    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
9871    /// subtree failure.
9872    pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
9873        self.client.send::<fidl::encoding::EmptyPayload>(
9874            (),
9875            0x5c327e4a23391312,
9876            fidl::encoding::DynamicFlags::FLEXIBLE,
9877        )
9878    }
9879}
9880
9881#[cfg(target_os = "fuchsia")]
9882impl From<BufferCollectionTokenGroupSynchronousProxy> for zx::Handle {
9883    fn from(value: BufferCollectionTokenGroupSynchronousProxy) -> Self {
9884        value.into_channel().into()
9885    }
9886}
9887
9888#[cfg(target_os = "fuchsia")]
9889impl From<fidl::Channel> for BufferCollectionTokenGroupSynchronousProxy {
9890    fn from(value: fidl::Channel) -> Self {
9891        Self::new(value)
9892    }
9893}
9894
9895#[cfg(target_os = "fuchsia")]
9896impl fidl::endpoints::FromClient for BufferCollectionTokenGroupSynchronousProxy {
9897    type Protocol = BufferCollectionTokenGroupMarker;
9898
9899    fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionTokenGroupMarker>) -> Self {
9900        Self::new(value.into_channel())
9901    }
9902}
9903
9904#[derive(Debug, Clone)]
9905pub struct BufferCollectionTokenGroupProxy {
9906    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
9907}
9908
9909impl fidl::endpoints::Proxy for BufferCollectionTokenGroupProxy {
9910    type Protocol = BufferCollectionTokenGroupMarker;
9911
9912    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
9913        Self::new(inner)
9914    }
9915
9916    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
9917        self.client.into_channel().map_err(|client| Self { client })
9918    }
9919
9920    fn as_channel(&self) -> &::fidl::AsyncChannel {
9921        self.client.as_channel()
9922    }
9923}
9924
9925impl BufferCollectionTokenGroupProxy {
9926    /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionTokenGroup.
9927    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
9928        let protocol_name =
9929            <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9930        Self { client: fidl::client::Client::new(channel, protocol_name) }
9931    }
9932
9933    /// Get a Stream of events from the remote end of the protocol.
9934    ///
9935    /// # Panics
9936    ///
9937    /// Panics if the event stream was already taken.
9938    pub fn take_event_stream(&self) -> BufferCollectionTokenGroupEventStream {
9939        BufferCollectionTokenGroupEventStream { event_receiver: self.client.take_event_receiver() }
9940    }
9941
9942    /// Ensure that previous messages have been received server side. This is
9943    /// particularly useful after previous messages that created new tokens,
9944    /// because a token must be known to the sysmem server before sending the
9945    /// token to another participant.
9946    ///
9947    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9948    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9949    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9950    /// to mitigate the possibility of a hostile/fake
9951    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9952    /// Another way is to pass the token to
9953    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9954    /// the token as part of exchanging it for a
9955    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9956    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9957    /// of stalling.
9958    ///
9959    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9960    /// and then starting and completing a `Sync`, it's then safe to send the
9961    /// `BufferCollectionToken` client ends to other participants knowing the
9962    /// server will recognize the tokens when they're sent by the other
9963    /// participants to sysmem in a
9964    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9965    /// efficient way to create tokens while avoiding unnecessary round trips.
9966    ///
9967    /// Other options include waiting for each
9968    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9969    /// individually (using separate call to `Sync` after each), or calling
9970    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9971    /// converted to a `BufferCollection` via
9972    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9973    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9974    /// the sync step and can create multiple tokens at once.
9975    pub fn r#sync(
9976        &self,
9977    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
9978        BufferCollectionTokenGroupProxyInterface::r#sync(self)
9979    }
9980
9981    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9982    ///
9983    /// Normally a participant will convert a `BufferCollectionToken` into a
9984    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9985    /// `Release` via the token (and then close the channel immediately or
9986    /// shortly later in response to server closing the server end), which
9987    /// avoids causing buffer collection failure. Without a prior `Release`,
9988    /// closing the `BufferCollectionToken` client end will cause buffer
9989    /// collection failure.
9990    ///
9991    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9992    ///
9993    /// By default the server handles unexpected closure of a
9994    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9995    /// first) by failing the buffer collection. Partly this is to expedite
9996    /// closing VMO handles to reclaim memory when any participant fails. If a
9997    /// participant would like to cleanly close a `BufferCollection` without
9998    /// causing buffer collection failure, the participant can send `Release`
9999    /// before closing the `BufferCollection` client end. The `Release` can
10000    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
10001    /// buffer collection won't require constraints from this node in order to
10002    /// allocate. If after `SetConstraints`, the constraints are retained and
10003    /// aggregated, despite the lack of `BufferCollection` connection at the
10004    /// time of constraints aggregation.
10005    ///
10006    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
10007    ///
10008    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
10009    /// end (without `Release` first) will trigger failure of the buffer
10010    /// collection. To close a `BufferCollectionTokenGroup` channel without
10011    /// failing the buffer collection, ensure that AllChildrenPresent() has been
10012    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
10013    /// client end.
10014    ///
10015    /// If `Release` occurs before
10016    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
10017    /// buffer collection will fail (triggered by reception of `Release` without
10018    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
10019    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
10020    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
10021    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
10022    /// close requires `AllChildrenPresent` (if not already sent), then
10023    /// `Release`, then close client end.
10024    ///
10025    /// If `Release` occurs after `AllChildrenPresent`, the children and all
10026    /// their constraints remain intact (just as they would if the
10027    /// `BufferCollectionTokenGroup` channel had remained open), and the client
10028    /// end close doesn't trigger buffer collection failure.
10029    ///
10030    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
10031    ///
10032    /// For brevity, the per-channel-protocol paragraphs above ignore the
10033    /// separate failure domain created by
10034    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
10035    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
10036    /// unexpectedly closes (without `Release` first) and that client end is
10037    /// under a failure domain, instead of failing the whole buffer collection,
10038    /// the failure domain is failed, but the buffer collection itself is
10039    /// isolated from failure of the failure domain. Such failure domains can be
10040    /// nested, in which case only the inner-most failure domain in which the
10041    /// `Node` resides fails.
10042    pub fn r#release(&self) -> Result<(), fidl::Error> {
10043        BufferCollectionTokenGroupProxyInterface::r#release(self)
10044    }
10045
10046    /// Set a name for VMOs in this buffer collection.
10047    ///
10048    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
10049    /// will be truncated to fit. The name of the vmo will be suffixed with the
10050    /// buffer index within the collection (if the suffix fits within
10051    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
10052    /// listed in the inspect data.
10053    ///
10054    /// The name only affects VMOs allocated after the name is set; this call
10055    /// does not rename existing VMOs. If multiple clients set different names
10056    /// then the larger priority value will win. Setting a new name with the
10057    /// same priority as a prior name doesn't change the name.
10058    ///
10059    /// All table fields are currently required.
10060    ///
10061    /// + request `priority` The name is only set if this is the first `SetName`
10062    ///   or if `priority` is greater than any previous `priority` value in
10063    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
10064    /// + request `name` The name for VMOs created under this buffer collection.
10065    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10066        BufferCollectionTokenGroupProxyInterface::r#set_name(self, payload)
10067    }
10068
10069    /// Set information about the current client that can be used by sysmem to
10070    /// help diagnose leaking memory and allocation stalls waiting for a
10071    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
10072    ///
10073    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
10074    /// `Node`(s) derived from this `Node`, unless overriden by
10075    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
10076    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
10077    ///
10078    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
10079    /// `Allocator` is the most efficient way to ensure that all
10080    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
10081    /// set, and is also more efficient than separately sending the same debug
10082    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
10083    /// created [`fuchsia.sysmem2/Node`].
10084    ///
10085    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
10086    /// indicate which client is closing their channel first, leading to subtree
10087    /// failure (which can be normal if the purpose of the subtree is over, but
10088    /// if happening earlier than expected, the client-channel-specific name can
10089    /// help diagnose where the failure is first coming from, from sysmem's
10090    /// point of view).
10091    ///
10092    /// All table fields are currently required.
10093    ///
10094    /// + request `name` This can be an arbitrary string, but the current
10095    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
10096    /// + request `id` This can be an arbitrary id, but the current process ID
10097    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
10098    pub fn r#set_debug_client_info(
10099        &self,
10100        mut payload: &NodeSetDebugClientInfoRequest,
10101    ) -> Result<(), fidl::Error> {
10102        BufferCollectionTokenGroupProxyInterface::r#set_debug_client_info(self, payload)
10103    }
10104
10105    /// Sysmem logs a warning if sysmem hasn't seen
10106    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
10107    /// within 5 seconds after creation of a new collection.
10108    ///
10109    /// Clients can call this method to change when the log is printed. If
10110    /// multiple client set the deadline, it's unspecified which deadline will
10111    /// take effect.
10112    ///
10113    /// In most cases the default works well.
10114    ///
10115    /// All table fields are currently required.
10116    ///
10117    /// + request `deadline` The time at which sysmem will start trying to log
10118    ///   the warning, unless all constraints are with sysmem by then.
10119    pub fn r#set_debug_timeout_log_deadline(
10120        &self,
10121        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10122    ) -> Result<(), fidl::Error> {
10123        BufferCollectionTokenGroupProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
10124    }
10125
10126    /// This enables verbose logging for the buffer collection.
10127    ///
10128    /// Verbose logging includes constraints set via
10129    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
10130    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
10131    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
10132    /// the tree of `Node`(s).
10133    ///
10134    /// Normally sysmem prints only a single line complaint when aggregation
10135    /// fails, with just the specific detailed reason that aggregation failed,
10136    /// with little surrounding context.  While this is often enough to diagnose
10137    /// a problem if only a small change was made and everything was working
10138    /// before the small change, it's often not particularly helpful for getting
10139    /// a new buffer collection to work for the first time.  Especially with
10140    /// more complex trees of nodes, involving things like
10141    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
10142    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
10143    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
10144    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
10145    /// looks like and why it's failing a logical allocation, or why a tree or
10146    /// subtree is failing sooner than expected.
10147    ///
10148    /// The intent of the extra logging is to be acceptable from a performance
10149    /// point of view, under the assumption that verbose logging is only enabled
10150    /// on a low number of buffer collections. If we're not tracking down a bug,
10151    /// we shouldn't send this message.
10152    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10153        BufferCollectionTokenGroupProxyInterface::r#set_verbose_logging(self)
10154    }
10155
10156    /// This gets a handle that can be used as a parameter to
10157    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
10158    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
10159    /// client obtained this handle from this `Node`.
10160    ///
10161    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
10162    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
10163    /// despite the two calls typically being on different channels.
10164    ///
10165    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
10166    ///
10167    /// All table fields are currently required.
10168    ///
10169    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
10170    ///   different `Node` channel, to prove that the client obtained the handle
10171    ///   from this `Node`.
10172    pub fn r#get_node_ref(
10173        &self,
10174    ) -> fidl::client::QueryResponseFut<
10175        NodeGetNodeRefResponse,
10176        fidl::encoding::DefaultFuchsiaResourceDialect,
10177    > {
10178        BufferCollectionTokenGroupProxyInterface::r#get_node_ref(self)
10179    }
10180
10181    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
10182    /// rooted at a different child token of a common parent
10183    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
10184    /// passed-in `node_ref`.
10185    ///
10186    /// This call is for assisting with admission control de-duplication, and
10187    /// with debugging.
10188    ///
10189    /// The `node_ref` must be obtained using
10190    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
10191    ///
10192    /// The `node_ref` can be a duplicated handle; it's not necessary to call
10193    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
10194    ///
10195    /// If a calling token may not actually be a valid token at all due to a
10196    /// potentially hostile/untrusted provider of the token, call
10197    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
10198    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
10199    /// never responds due to a calling token not being a real token (not really
10200    /// talking to sysmem).  Another option is to call
10201    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
10202    /// which also validates the token along with converting it to a
10203    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
10204    ///
10205    /// All table fields are currently required.
10206    ///
10207    /// - response `is_alternate`
10208    ///   - true: The first parent node in common between the calling node and
10209    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
10210    ///     that the calling `Node` and the `node_ref` `Node` will not have both
10211    ///     their constraints apply - rather sysmem will choose one or the other
10212    ///     of the constraints - never both.  This is because only one child of
10213    ///     a `BufferCollectionTokenGroup` is selected during logical
10214    ///     allocation, with only that one child's subtree contributing to
10215    ///     constraints aggregation.
10216    ///   - false: The first parent node in common between the calling `Node`
10217    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
10218    ///     Currently, this means the first parent node in common is a
10219    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
10220    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
10221    ///     `Node` may have both their constraints apply during constraints
10222    ///     aggregation of the logical allocation, if both `Node`(s) are
10223    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
10224    ///     this case, there is no `BufferCollectionTokenGroup` that will
10225    ///     directly prevent the two `Node`(s) from both being selected and
10226    ///     their constraints both aggregated, but even when false, one or both
10227    ///     `Node`(s) may still be eliminated from consideration if one or both
10228    ///     `Node`(s) has a direct or indirect parent
10229    ///     `BufferCollectionTokenGroup` which selects a child subtree other
10230    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
10231    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
10232    ///   associated with the same buffer collection as the calling `Node`.
10233    ///   Another reason for this error is if the `node_ref` is an
10234    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
10235    ///   a real `node_ref` obtained from `GetNodeRef`.
10236    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
10237    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
10238    ///   the needed rights expected on a real `node_ref`.
10239    /// * No other failing status codes are returned by this call.  However,
10240    ///   sysmem may add additional codes in future, so the client should have
10241    ///   sensible default handling for any failing status code.
10242    pub fn r#is_alternate_for(
10243        &self,
10244        mut payload: NodeIsAlternateForRequest,
10245    ) -> fidl::client::QueryResponseFut<
10246        NodeIsAlternateForResult,
10247        fidl::encoding::DefaultFuchsiaResourceDialect,
10248    > {
10249        BufferCollectionTokenGroupProxyInterface::r#is_alternate_for(self, payload)
10250    }
10251
10252    /// Get the buffer collection ID. This ID is also available from
10253    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
10254    /// within the collection).
10255    ///
10256    /// This call is mainly useful in situations where we can't convey a
10257    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
10258    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
10259    /// handle, which can be joined back up with a `BufferCollection` client end
10260    /// that was created via a different path. Prefer to convey a
10261    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
10262    ///
10263    /// Trusting a `buffer_collection_id` value from a source other than sysmem
10264    /// is analogous to trusting a koid value from a source other than zircon.
10265    /// Both should be avoided unless really necessary, and both require
10266    /// caution. In some situations it may be reasonable to refer to a
10267    /// pre-established `BufferCollection` by `buffer_collection_id` via a
10268    /// protocol for efficiency reasons, but an incoming value purporting to be
10269    /// a `buffer_collection_id` is not sufficient alone to justify granting the
10270    /// sender of the `buffer_collection_id` any capability. The sender must
10271    /// first prove to a receiver that the sender has/had a VMO or has/had a
10272    /// `BufferCollectionToken` to the same collection by sending a handle that
10273    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
10274    /// `buffer_collection_id` value. The receiver should take care to avoid
10275    /// assuming that a sender had a `BufferCollectionToken` in cases where the
10276    /// sender has only proven that the sender had a VMO.
10277    ///
10278    /// - response `buffer_collection_id` This ID is unique per buffer
10279    ///   collection per boot. Each buffer is uniquely identified by the
10280    ///   `buffer_collection_id` and `buffer_index` together.
10281    pub fn r#get_buffer_collection_id(
10282        &self,
10283    ) -> fidl::client::QueryResponseFut<
10284        NodeGetBufferCollectionIdResponse,
10285        fidl::encoding::DefaultFuchsiaResourceDialect,
10286    > {
10287        BufferCollectionTokenGroupProxyInterface::r#get_buffer_collection_id(self)
10288    }
10289
10290    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
10291    /// created after this message to weak, which means that a client's `Node`
10292    /// client end (or a child created after this message) is not alone
10293    /// sufficient to keep allocated VMOs alive.
10294    ///
10295    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
10296    /// `close_weak_asap`.
10297    ///
10298    /// This message is only permitted before the `Node` becomes ready for
10299    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
10300    ///   * `BufferCollectionToken`: any time
10301    ///   * `BufferCollection`: before `SetConstraints`
10302    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
10303    ///
10304    /// Currently, no conversion from strong `Node` to weak `Node` after ready
10305    /// for allocation is provided, but a client can simulate that by creating
10306    /// an additional `Node` before allocation and setting that additional
10307    /// `Node` to weak, and then potentially at some point later sending
10308    /// `Release` and closing the client end of the client's strong `Node`, but
10309    /// keeping the client's weak `Node`.
10310    ///
10311    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
10312    /// collection failure (all `Node` client end(s) will see
10313    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
10314    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
10315    /// this situation until all `Node`(s) are ready for allocation. For initial
10316    /// allocation to succeed, at least one strong `Node` is required to exist
10317    /// at allocation time, but after that client receives VMO handles, that
10318    /// client can `BufferCollection.Release` and close the client end without
10319    /// causing this type of failure.
10320    ///
10321    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
10322    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
10323    /// separately as appropriate.
10324    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
10325        BufferCollectionTokenGroupProxyInterface::r#set_weak(self)
10326    }
10327
10328    /// This indicates to sysmem that the client is prepared to pay attention to
10329    /// `close_weak_asap`.
10330    ///
10331    /// If sent, this message must be before
10332    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
10333    ///
10334    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
10335    /// send this message before `WaitForAllBuffersAllocated`, or a parent
10336    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
10337    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
10338    /// trigger buffer collection failure.
10339    ///
10340    /// This message is necessary because weak sysmem VMOs have not always been
10341    /// a thing, so older clients are not aware of the need to pay attention to
10342    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
10343    /// sysmem weak VMO handles asap. By having this message and requiring
10344    /// participants to indicate their acceptance of this aspect of the overall
10345    /// protocol, we avoid situations where an older client is delivered a weak
10346    /// VMO without any way for sysmem to get that VMO to close quickly later
10347    /// (and on a per-buffer basis).
10348    ///
10349    /// A participant that doesn't handle `close_weak_asap` and also doesn't
10350    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
10351    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
10352    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
10353    /// same participant has a child/delegate which does retrieve VMOs, that
10354    /// child/delegate will need to send `SetWeakOk` before
10355    /// `WaitForAllBuffersAllocated`.
10356    ///
10357    /// + request `for_child_nodes_also` If present and true, this means direct
10358    ///   child nodes of this node created after this message plus all
10359    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
10360    ///   those nodes. Any child node of this node that was created before this
10361    ///   message is not included. This setting is "sticky" in the sense that a
10362    ///   subsequent `SetWeakOk` without this bool set to true does not reset
10363    ///   the server-side bool. If this creates a problem for a participant, a
10364    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
10365    ///   tokens instead, as appropriate. A participant should only set
10366    ///   `for_child_nodes_also` true if the participant can really promise to
10367    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
10368    ///   weak VMO handles held by participants holding the corresponding child
10369    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
10370    ///   which are using sysmem(1) can be weak, despite the clients of those
10371    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
10372    ///   direct way to find out about `close_weak_asap`. This only applies to
10373    ///   descendents of this `Node` which are using sysmem(1), not to this
10374    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
10375    ///   token, which will fail allocation unless an ancestor of this `Node`
10376    ///   specified `for_child_nodes_also` true.
10377    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10378        BufferCollectionTokenGroupProxyInterface::r#set_weak_ok(self, payload)
10379    }
10380
10381    /// The server_end will be closed after this `Node` and any child nodes have
10382    /// have released their buffer counts, making those counts available for
10383    /// reservation by a different `Node` via
10384    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
10385    ///
10386    /// The `Node` buffer counts may not be released until the entire tree of
10387    /// `Node`(s) is closed or failed, because
10388    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
10389    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
10390    /// `Node` buffer counts remain reserved until the orphaned node is later
10391    /// cleaned up.
10392    ///
10393    /// If the `Node` exceeds a fairly large number of attached eventpair server
10394    /// ends, a log message will indicate this and the `Node` (and the
10395    /// appropriate) sub-tree will fail.
10396    ///
10397    /// The `server_end` will remain open when
10398    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
10399    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
10400    /// [`fuchsia.sysmem2/BufferCollection`].
10401    ///
10402    /// This message can also be used with a
10403    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
10404    pub fn r#attach_node_tracking(
10405        &self,
10406        mut payload: NodeAttachNodeTrackingRequest,
10407    ) -> Result<(), fidl::Error> {
10408        BufferCollectionTokenGroupProxyInterface::r#attach_node_tracking(self, payload)
10409    }
10410
10411    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
10412    /// (including its children) will be selected during allocation (or logical
10413    /// allocation).
10414    ///
10415    /// Before passing the client end of this token to
10416    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
10417    /// [`fuchsia.sysmem2/Node.Sync`] after
10418    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
10419    /// Or the client can use
10420    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
10421    /// essentially includes the `Sync`.
10422    ///
10423    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10424    /// fail the group's subtree and close the connection.
10425    ///
10426    /// After all children have been created, send AllChildrenPresent.
10427    ///
10428    /// + request `token_request` The server end of the new token channel.
10429    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
10430    ///   token allows the holder to get the same rights to buffers as the
10431    ///   parent token (of the group) had. When the value isn't
10432    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
10433    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
10434    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
10435    ///   causes subtree failure.
10436    pub fn r#create_child(
10437        &self,
10438        mut payload: BufferCollectionTokenGroupCreateChildRequest,
10439    ) -> Result<(), fidl::Error> {
10440        BufferCollectionTokenGroupProxyInterface::r#create_child(self, payload)
10441    }
10442
10443    /// Create 1 or more child tokens at once, synchronously.  In contrast to
10444    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
10445    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
10446    /// of a returned token to
10447    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
10448    ///
10449    /// The lower-index child tokens are higher priority (attempted sooner) than
10450    /// higher-index child tokens.
10451    ///
10452    /// As per all child tokens, successful aggregation will choose exactly one
10453    /// child among all created children (across all children created across
10454    /// potentially multiple calls to
10455    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
10456    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
10457    ///
10458    /// The maximum permissible total number of children per group, and total
10459    /// number of nodes in an overall tree (from the root) are capped to limits
10460    /// which are not configurable via these protocols.
10461    ///
10462    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
10463    /// this will fail the group's subtree and close the connection.
10464    ///
10465    /// After all children have been created, send AllChildrenPresent.
10466    ///
10467    /// + request `rights_attentuation_masks` The size of the
10468    ///   `rights_attentuation_masks` determines the number of created child
10469    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
10470    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
10471    ///   other value, each 0 bit in the mask attenuates that right.
10472    /// - response `tokens` The created child tokens.
10473    pub fn r#create_children_sync(
10474        &self,
10475        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10476    ) -> fidl::client::QueryResponseFut<
10477        BufferCollectionTokenGroupCreateChildrenSyncResponse,
10478        fidl::encoding::DefaultFuchsiaResourceDialect,
10479    > {
10480        BufferCollectionTokenGroupProxyInterface::r#create_children_sync(self, payload)
10481    }
10482
10483    /// Indicate that no more children will be created.
10484    ///
10485    /// After creating all children, the client should send
10486    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
10487    /// inform sysmem that no more children will be created, so that sysmem can
10488    /// know when it's ok to start aggregating constraints.
10489    ///
10490    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10491    /// fail the group's subtree and close the connection.
10492    ///
10493    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
10494    /// after `AllChildrenPresent`, else failure of the group's subtree will be
10495    /// triggered. This is intentionally not analogous to how `Release` without
10496    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
10497    /// subtree failure.
10498    pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10499        BufferCollectionTokenGroupProxyInterface::r#all_children_present(self)
10500    }
10501}
10502
10503impl BufferCollectionTokenGroupProxyInterface for BufferCollectionTokenGroupProxy {
10504    type SyncResponseFut =
10505        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
10506    fn r#sync(&self) -> Self::SyncResponseFut {
10507        fn _decode(
10508            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10509        ) -> Result<(), fidl::Error> {
10510            let _response = fidl::client::decode_transaction_body::<
10511                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
10512                fidl::encoding::DefaultFuchsiaResourceDialect,
10513                0x11ac2555cf575b54,
10514            >(_buf?)?
10515            .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
10516            Ok(_response)
10517        }
10518        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
10519            (),
10520            0x11ac2555cf575b54,
10521            fidl::encoding::DynamicFlags::FLEXIBLE,
10522            _decode,
10523        )
10524    }
10525
10526    fn r#release(&self) -> Result<(), fidl::Error> {
10527        self.client.send::<fidl::encoding::EmptyPayload>(
10528            (),
10529            0x6a5cae7d6d6e04c6,
10530            fidl::encoding::DynamicFlags::FLEXIBLE,
10531        )
10532    }
10533
10534    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10535        self.client.send::<NodeSetNameRequest>(
10536            payload,
10537            0xb41f1624f48c1e9,
10538            fidl::encoding::DynamicFlags::FLEXIBLE,
10539        )
10540    }
10541
10542    fn r#set_debug_client_info(
10543        &self,
10544        mut payload: &NodeSetDebugClientInfoRequest,
10545    ) -> Result<(), fidl::Error> {
10546        self.client.send::<NodeSetDebugClientInfoRequest>(
10547            payload,
10548            0x5cde8914608d99b1,
10549            fidl::encoding::DynamicFlags::FLEXIBLE,
10550        )
10551    }
10552
10553    fn r#set_debug_timeout_log_deadline(
10554        &self,
10555        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10556    ) -> Result<(), fidl::Error> {
10557        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
10558            payload,
10559            0x716b0af13d5c0806,
10560            fidl::encoding::DynamicFlags::FLEXIBLE,
10561        )
10562    }
10563
10564    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10565        self.client.send::<fidl::encoding::EmptyPayload>(
10566            (),
10567            0x5209c77415b4dfad,
10568            fidl::encoding::DynamicFlags::FLEXIBLE,
10569        )
10570    }
10571
10572    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
10573        NodeGetNodeRefResponse,
10574        fidl::encoding::DefaultFuchsiaResourceDialect,
10575    >;
10576    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
10577        fn _decode(
10578            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10579        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
10580            let _response = fidl::client::decode_transaction_body::<
10581                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
10582                fidl::encoding::DefaultFuchsiaResourceDialect,
10583                0x5b3d0e51614df053,
10584            >(_buf?)?
10585            .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
10586            Ok(_response)
10587        }
10588        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
10589            (),
10590            0x5b3d0e51614df053,
10591            fidl::encoding::DynamicFlags::FLEXIBLE,
10592            _decode,
10593        )
10594    }
10595
10596    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
10597        NodeIsAlternateForResult,
10598        fidl::encoding::DefaultFuchsiaResourceDialect,
10599    >;
10600    fn r#is_alternate_for(
10601        &self,
10602        mut payload: NodeIsAlternateForRequest,
10603    ) -> Self::IsAlternateForResponseFut {
10604        fn _decode(
10605            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10606        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
10607            let _response = fidl::client::decode_transaction_body::<
10608                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
10609                fidl::encoding::DefaultFuchsiaResourceDialect,
10610                0x3a58e00157e0825,
10611            >(_buf?)?
10612            .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
10613            Ok(_response.map(|x| x))
10614        }
10615        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
10616            &mut payload,
10617            0x3a58e00157e0825,
10618            fidl::encoding::DynamicFlags::FLEXIBLE,
10619            _decode,
10620        )
10621    }
10622
10623    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
10624        NodeGetBufferCollectionIdResponse,
10625        fidl::encoding::DefaultFuchsiaResourceDialect,
10626    >;
10627    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
10628        fn _decode(
10629            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10630        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
10631            let _response = fidl::client::decode_transaction_body::<
10632                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
10633                fidl::encoding::DefaultFuchsiaResourceDialect,
10634                0x77d19a494b78ba8c,
10635            >(_buf?)?
10636            .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
10637            Ok(_response)
10638        }
10639        self.client.send_query_and_decode::<
10640            fidl::encoding::EmptyPayload,
10641            NodeGetBufferCollectionIdResponse,
10642        >(
10643            (),
10644            0x77d19a494b78ba8c,
10645            fidl::encoding::DynamicFlags::FLEXIBLE,
10646            _decode,
10647        )
10648    }
10649
10650    fn r#set_weak(&self) -> Result<(), fidl::Error> {
10651        self.client.send::<fidl::encoding::EmptyPayload>(
10652            (),
10653            0x22dd3ea514eeffe1,
10654            fidl::encoding::DynamicFlags::FLEXIBLE,
10655        )
10656    }
10657
10658    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10659        self.client.send::<NodeSetWeakOkRequest>(
10660            &mut payload,
10661            0x38a44fc4d7724be9,
10662            fidl::encoding::DynamicFlags::FLEXIBLE,
10663        )
10664    }
10665
10666    fn r#attach_node_tracking(
10667        &self,
10668        mut payload: NodeAttachNodeTrackingRequest,
10669    ) -> Result<(), fidl::Error> {
10670        self.client.send::<NodeAttachNodeTrackingRequest>(
10671            &mut payload,
10672            0x3f22f2a293d3cdac,
10673            fidl::encoding::DynamicFlags::FLEXIBLE,
10674        )
10675    }
10676
10677    fn r#create_child(
10678        &self,
10679        mut payload: BufferCollectionTokenGroupCreateChildRequest,
10680    ) -> Result<(), fidl::Error> {
10681        self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
10682            &mut payload,
10683            0x41a0075d419f30c5,
10684            fidl::encoding::DynamicFlags::FLEXIBLE,
10685        )
10686    }
10687
10688    type CreateChildrenSyncResponseFut = fidl::client::QueryResponseFut<
10689        BufferCollectionTokenGroupCreateChildrenSyncResponse,
10690        fidl::encoding::DefaultFuchsiaResourceDialect,
10691    >;
10692    fn r#create_children_sync(
10693        &self,
10694        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10695    ) -> Self::CreateChildrenSyncResponseFut {
10696        fn _decode(
10697            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10698        ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
10699            let _response = fidl::client::decode_transaction_body::<
10700                fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
10701                fidl::encoding::DefaultFuchsiaResourceDialect,
10702                0x15dea448c536070a,
10703            >(_buf?)?
10704            .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
10705            Ok(_response)
10706        }
10707        self.client.send_query_and_decode::<
10708            BufferCollectionTokenGroupCreateChildrenSyncRequest,
10709            BufferCollectionTokenGroupCreateChildrenSyncResponse,
10710        >(
10711            payload,
10712            0x15dea448c536070a,
10713            fidl::encoding::DynamicFlags::FLEXIBLE,
10714            _decode,
10715        )
10716    }
10717
10718    fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10719        self.client.send::<fidl::encoding::EmptyPayload>(
10720            (),
10721            0x5c327e4a23391312,
10722            fidl::encoding::DynamicFlags::FLEXIBLE,
10723        )
10724    }
10725}
10726
10727pub struct BufferCollectionTokenGroupEventStream {
10728    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
10729}
10730
10731impl std::marker::Unpin for BufferCollectionTokenGroupEventStream {}
10732
10733impl futures::stream::FusedStream for BufferCollectionTokenGroupEventStream {
10734    fn is_terminated(&self) -> bool {
10735        self.event_receiver.is_terminated()
10736    }
10737}
10738
10739impl futures::Stream for BufferCollectionTokenGroupEventStream {
10740    type Item = Result<BufferCollectionTokenGroupEvent, fidl::Error>;
10741
10742    fn poll_next(
10743        mut self: std::pin::Pin<&mut Self>,
10744        cx: &mut std::task::Context<'_>,
10745    ) -> std::task::Poll<Option<Self::Item>> {
10746        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
10747            &mut self.event_receiver,
10748            cx
10749        )?) {
10750            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenGroupEvent::decode(buf))),
10751            None => std::task::Poll::Ready(None),
10752        }
10753    }
10754}
10755
10756#[derive(Debug)]
10757pub enum BufferCollectionTokenGroupEvent {
10758    #[non_exhaustive]
10759    _UnknownEvent {
10760        /// Ordinal of the event that was sent.
10761        ordinal: u64,
10762    },
10763}
10764
10765impl BufferCollectionTokenGroupEvent {
10766    /// Decodes a message buffer as a [`BufferCollectionTokenGroupEvent`].
10767    fn decode(
10768        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
10769    ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
10770        let (bytes, _handles) = buf.split_mut();
10771        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10772        debug_assert_eq!(tx_header.tx_id, 0);
10773        match tx_header.ordinal {
10774            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
10775                Ok(BufferCollectionTokenGroupEvent::_UnknownEvent {
10776                    ordinal: tx_header.ordinal,
10777                })
10778            }
10779            _ => Err(fidl::Error::UnknownOrdinal {
10780                ordinal: tx_header.ordinal,
10781                protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
10782            })
10783        }
10784    }
10785}
10786
10787/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionTokenGroup.
10788pub struct BufferCollectionTokenGroupRequestStream {
10789    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10790    is_terminated: bool,
10791}
10792
10793impl std::marker::Unpin for BufferCollectionTokenGroupRequestStream {}
10794
10795impl futures::stream::FusedStream for BufferCollectionTokenGroupRequestStream {
10796    fn is_terminated(&self) -> bool {
10797        self.is_terminated
10798    }
10799}
10800
10801impl fidl::endpoints::RequestStream for BufferCollectionTokenGroupRequestStream {
10802    type Protocol = BufferCollectionTokenGroupMarker;
10803    type ControlHandle = BufferCollectionTokenGroupControlHandle;
10804
10805    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
10806        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
10807    }
10808
10809    fn control_handle(&self) -> Self::ControlHandle {
10810        BufferCollectionTokenGroupControlHandle { inner: self.inner.clone() }
10811    }
10812
10813    fn into_inner(
10814        self,
10815    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
10816    {
10817        (self.inner, self.is_terminated)
10818    }
10819
10820    fn from_inner(
10821        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10822        is_terminated: bool,
10823    ) -> Self {
10824        Self { inner, is_terminated }
10825    }
10826}
10827
10828impl futures::Stream for BufferCollectionTokenGroupRequestStream {
10829    type Item = Result<BufferCollectionTokenGroupRequest, fidl::Error>;
10830
10831    fn poll_next(
10832        mut self: std::pin::Pin<&mut Self>,
10833        cx: &mut std::task::Context<'_>,
10834    ) -> std::task::Poll<Option<Self::Item>> {
10835        let this = &mut *self;
10836        if this.inner.check_shutdown(cx) {
10837            this.is_terminated = true;
10838            return std::task::Poll::Ready(None);
10839        }
10840        if this.is_terminated {
10841            panic!("polled BufferCollectionTokenGroupRequestStream after completion");
10842        }
10843        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
10844            |bytes, handles| {
10845                match this.inner.channel().read_etc(cx, bytes, handles) {
10846                    std::task::Poll::Ready(Ok(())) => {}
10847                    std::task::Poll::Pending => return std::task::Poll::Pending,
10848                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
10849                        this.is_terminated = true;
10850                        return std::task::Poll::Ready(None);
10851                    }
10852                    std::task::Poll::Ready(Err(e)) => {
10853                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
10854                            e.into(),
10855                        ))));
10856                    }
10857                }
10858
10859                // A message has been received from the channel
10860                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10861
10862                std::task::Poll::Ready(Some(match header.ordinal {
10863                0x11ac2555cf575b54 => {
10864                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10865                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10866                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10867                    let control_handle = BufferCollectionTokenGroupControlHandle {
10868                        inner: this.inner.clone(),
10869                    };
10870                    Ok(BufferCollectionTokenGroupRequest::Sync {
10871                        responder: BufferCollectionTokenGroupSyncResponder {
10872                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10873                            tx_id: header.tx_id,
10874                        },
10875                    })
10876                }
10877                0x6a5cae7d6d6e04c6 => {
10878                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10879                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10880                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10881                    let control_handle = BufferCollectionTokenGroupControlHandle {
10882                        inner: this.inner.clone(),
10883                    };
10884                    Ok(BufferCollectionTokenGroupRequest::Release {
10885                        control_handle,
10886                    })
10887                }
10888                0xb41f1624f48c1e9 => {
10889                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10890                    let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10891                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
10892                    let control_handle = BufferCollectionTokenGroupControlHandle {
10893                        inner: this.inner.clone(),
10894                    };
10895                    Ok(BufferCollectionTokenGroupRequest::SetName {payload: req,
10896                        control_handle,
10897                    })
10898                }
10899                0x5cde8914608d99b1 => {
10900                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10901                    let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10902                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
10903                    let control_handle = BufferCollectionTokenGroupControlHandle {
10904                        inner: this.inner.clone(),
10905                    };
10906                    Ok(BufferCollectionTokenGroupRequest::SetDebugClientInfo {payload: req,
10907                        control_handle,
10908                    })
10909                }
10910                0x716b0af13d5c0806 => {
10911                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10912                    let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10913                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
10914                    let control_handle = BufferCollectionTokenGroupControlHandle {
10915                        inner: this.inner.clone(),
10916                    };
10917                    Ok(BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {payload: req,
10918                        control_handle,
10919                    })
10920                }
10921                0x5209c77415b4dfad => {
10922                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10923                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10924                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10925                    let control_handle = BufferCollectionTokenGroupControlHandle {
10926                        inner: this.inner.clone(),
10927                    };
10928                    Ok(BufferCollectionTokenGroupRequest::SetVerboseLogging {
10929                        control_handle,
10930                    })
10931                }
10932                0x5b3d0e51614df053 => {
10933                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10934                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10935                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10936                    let control_handle = BufferCollectionTokenGroupControlHandle {
10937                        inner: this.inner.clone(),
10938                    };
10939                    Ok(BufferCollectionTokenGroupRequest::GetNodeRef {
10940                        responder: BufferCollectionTokenGroupGetNodeRefResponder {
10941                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10942                            tx_id: header.tx_id,
10943                        },
10944                    })
10945                }
10946                0x3a58e00157e0825 => {
10947                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10948                    let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10949                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
10950                    let control_handle = BufferCollectionTokenGroupControlHandle {
10951                        inner: this.inner.clone(),
10952                    };
10953                    Ok(BufferCollectionTokenGroupRequest::IsAlternateFor {payload: req,
10954                        responder: BufferCollectionTokenGroupIsAlternateForResponder {
10955                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10956                            tx_id: header.tx_id,
10957                        },
10958                    })
10959                }
10960                0x77d19a494b78ba8c => {
10961                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10962                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10963                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10964                    let control_handle = BufferCollectionTokenGroupControlHandle {
10965                        inner: this.inner.clone(),
10966                    };
10967                    Ok(BufferCollectionTokenGroupRequest::GetBufferCollectionId {
10968                        responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder {
10969                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10970                            tx_id: header.tx_id,
10971                        },
10972                    })
10973                }
10974                0x22dd3ea514eeffe1 => {
10975                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10976                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10977                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10978                    let control_handle = BufferCollectionTokenGroupControlHandle {
10979                        inner: this.inner.clone(),
10980                    };
10981                    Ok(BufferCollectionTokenGroupRequest::SetWeak {
10982                        control_handle,
10983                    })
10984                }
10985                0x38a44fc4d7724be9 => {
10986                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10987                    let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10988                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
10989                    let control_handle = BufferCollectionTokenGroupControlHandle {
10990                        inner: this.inner.clone(),
10991                    };
10992                    Ok(BufferCollectionTokenGroupRequest::SetWeakOk {payload: req,
10993                        control_handle,
10994                    })
10995                }
10996                0x3f22f2a293d3cdac => {
10997                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10998                    let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10999                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
11000                    let control_handle = BufferCollectionTokenGroupControlHandle {
11001                        inner: this.inner.clone(),
11002                    };
11003                    Ok(BufferCollectionTokenGroupRequest::AttachNodeTracking {payload: req,
11004                        control_handle,
11005                    })
11006                }
11007                0x41a0075d419f30c5 => {
11008                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11009                    let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11010                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildRequest>(&header, _body_bytes, handles, &mut req)?;
11011                    let control_handle = BufferCollectionTokenGroupControlHandle {
11012                        inner: this.inner.clone(),
11013                    };
11014                    Ok(BufferCollectionTokenGroupRequest::CreateChild {payload: req,
11015                        control_handle,
11016                    })
11017                }
11018                0x15dea448c536070a => {
11019                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
11020                    let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildrenSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11021                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildrenSyncRequest>(&header, _body_bytes, handles, &mut req)?;
11022                    let control_handle = BufferCollectionTokenGroupControlHandle {
11023                        inner: this.inner.clone(),
11024                    };
11025                    Ok(BufferCollectionTokenGroupRequest::CreateChildrenSync {payload: req,
11026                        responder: BufferCollectionTokenGroupCreateChildrenSyncResponder {
11027                            control_handle: std::mem::ManuallyDrop::new(control_handle),
11028                            tx_id: header.tx_id,
11029                        },
11030                    })
11031                }
11032                0x5c327e4a23391312 => {
11033                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11034                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
11035                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
11036                    let control_handle = BufferCollectionTokenGroupControlHandle {
11037                        inner: this.inner.clone(),
11038                    };
11039                    Ok(BufferCollectionTokenGroupRequest::AllChildrenPresent {
11040                        control_handle,
11041                    })
11042                }
11043                _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11044                    Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11045                        ordinal: header.ordinal,
11046                        control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11047                        method_type: fidl::MethodType::OneWay,
11048                    })
11049                }
11050                _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11051                    this.inner.send_framework_err(
11052                        fidl::encoding::FrameworkErr::UnknownMethod,
11053                        header.tx_id,
11054                        header.ordinal,
11055                        header.dynamic_flags(),
11056                        (bytes, handles),
11057                    )?;
11058                    Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11059                        ordinal: header.ordinal,
11060                        control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11061                        method_type: fidl::MethodType::TwoWay,
11062                    })
11063                }
11064                _ => Err(fidl::Error::UnknownOrdinal {
11065                    ordinal: header.ordinal,
11066                    protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
11067                }),
11068            }))
11069            },
11070        )
11071    }
11072}
11073
11074/// The sysmem implementation is consistent with a logical / conceptual model of
11075/// allocation / logical allocation as follows:
11076///
11077/// As usual, a logical allocation considers either the root and all nodes with
11078/// connectivity to the root that don't transit a [`fuchsia.sysmem2/Node`]
11079/// created with [`fuchsia.sysmem2/BufferCollection.AttachToken`], or a subtree
11080/// rooted at an `AttachToken` `Node` and all `Node`(s) with connectivity to
11081/// that subtree that don't transit another `AttachToken`.  This is called the
11082/// logical allocation pruned subtree, or pruned subtree for short.
11083///
11084/// During constraints aggregation, each
11085/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] will select a single child
11086/// `Node` among its direct children. The rest of the children will appear to
11087/// fail the logical allocation, while the selected child may succeed.
11088///
11089/// When more than one `BufferCollectionTokenGroup` exists in the overall
11090/// logical allocation pruned subtree, the relative priority between two groups
11091/// is equivalent to their ordering in a DFS pre-order iteration of the tree,
11092/// with parents higher priority than children, and left children higher
11093/// priority than right children.
11094///
11095/// When a particular child of a group is selected (whether provisionally during
11096/// a constraints aggregation attempt, or as a final selection), the
11097/// non-selection of other children of the group will "hide" any other groups
11098/// under those non-selected children.
11099///
11100/// Within a logical allocation, aggregation is attempted first by provisionally
11101/// selecting child 0 of the highest-priority group, and child 0 of the next
11102/// highest-priority group that isn't hidden by the provisional selections so
11103/// far, etc.
11104///
11105/// If that aggregation attempt fails, aggregation will be attempted with the
11106/// ordinal 0 child of all the same groups except the lowest priority non-hidden
11107/// group which will provisionally select its ordinal 1 child (and then child 2
11108/// and so on). If a new lowest-priority group is un-hidden as provisional
11109/// selections are updated, that newly un-hidden lowest-priority group has all
11110/// its children considered in order, before changing the provisional selection
11111/// in the former lowest-priority group. In terms of result, this is equivalent
11112/// to systematic enumeration of all possible combinations of choices in a
11113/// counting-like order updating the lowest-priority group the most often and
11114/// the highest-priority group the least often. Rather than actually attempting
11115/// aggregation with all the combinations, we can skip over combinations which
11116/// are redundant/equivalent due to hiding without any change to the result.
11117///
11118/// Attempted constraint aggregations of enumerated non-equivalent combinations
11119/// of choices continue in this manner until either (a) all aggregation attempts
11120/// fail in which case the overall logical allocation fails, or (b) until an
11121/// attempted aggregation succeeds, in which case buffer allocation (if needed;
11122/// if this is the pruned subtree rooted at the overall root `Node`) is
11123/// attempted once. If buffer allocation based on the first successful
11124/// constraints aggregation fails, the overall logical allocation fails (there
11125/// is no buffer allocation retry / re-attempt). If buffer allocation succeeds
11126/// (or is not needed due to being a pruned subtree that doesn't include the
11127/// root), the logical allocation succeeds.
11128///
11129/// If this prioritization scheme cannot reasonably work for your usage of
11130/// sysmem, please don't hesitate to contact sysmem folks to discuss potentially
11131/// adding a way to achieve what you need.
11132///
11133/// Please avoid creating a large number of `BufferCollectionTokenGroup`(s) per
11134/// logical allocation, especially with large number of children overall, and
11135/// especially in cases where aggregation may reasonably be expected to often
11136/// fail using ordinal 0 children and possibly with later children as well.
11137/// Sysmem mitigates potentially high time complexity of evaluating too many
11138/// child combinations/selections across too many groups by simply failing
11139/// logical allocation beyond a certain (fairly high, but not huge) max number
11140/// of considered group child combinations/selections. More advanced (and more
11141/// complicated) mitigation is not anticipated to be practically necessary or
11142/// worth the added complexity. Please contact sysmem folks if the max limit is
11143/// getting hit or if you anticipate it getting hit, to discuss potential
11144/// options.
11145///
11146/// Prefer to use multiple [`fuchsia.sysmem2/ImageFormatConstraints`] in a
11147/// single [`fuchsia.sysmem2/BufferCollectionConstraints`] when feasible (when a
11148/// participant just needs to express the ability to work with more than a
11149/// single [`fuchsia.images2/PixelFormat`], with sysmem choosing which
11150/// `PixelFormat` to use among those supported by all participants).
11151///
11152/// Similar to [`fuchsia.sysmem2/BufferCollectionToken`] and
11153/// [`fuchsia.sysmem2/BufferCollection`], closure of the
11154/// `BufferCollectionTokenGroup` channel without sending
11155/// [`fuchsia.sysmem2/Node.Release`] first will cause buffer collection failure
11156/// (or subtree failure if using
11157/// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11158/// [`fuchsia.sysmem2/BufferCollection.AttachToken`] and the
11159/// `BufferCollectionTokenGroup` is part of a subtree under such a node that
11160/// doesn't propagate failure to its parent).
11161///
11162/// Epitaphs are not used in this protocol.
11163#[derive(Debug)]
11164pub enum BufferCollectionTokenGroupRequest {
11165    /// Ensure that previous messages have been received server side. This is
11166    /// particularly useful after previous messages that created new tokens,
11167    /// because a token must be known to the sysmem server before sending the
11168    /// token to another participant.
11169    ///
11170    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
11171    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
11172    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
11173    /// to mitigate the possibility of a hostile/fake
11174    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
11175    /// Another way is to pass the token to
11176    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
11177    /// the token as part of exchanging it for a
11178    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
11179    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
11180    /// of stalling.
11181    ///
11182    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
11183    /// and then starting and completing a `Sync`, it's then safe to send the
11184    /// `BufferCollectionToken` client ends to other participants knowing the
11185    /// server will recognize the tokens when they're sent by the other
11186    /// participants to sysmem in a
11187    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
11188    /// efficient way to create tokens while avoiding unnecessary round trips.
11189    ///
11190    /// Other options include waiting for each
11191    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
11192    /// individually (using separate call to `Sync` after each), or calling
11193    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
11194    /// converted to a `BufferCollection` via
11195    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
11196    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
11197    /// the sync step and can create multiple tokens at once.
11198    Sync { responder: BufferCollectionTokenGroupSyncResponder },
11199    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
11200    ///
11201    /// Normally a participant will convert a `BufferCollectionToken` into a
11202    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
11203    /// `Release` via the token (and then close the channel immediately or
11204    /// shortly later in response to server closing the server end), which
11205    /// avoids causing buffer collection failure. Without a prior `Release`,
11206    /// closing the `BufferCollectionToken` client end will cause buffer
11207    /// collection failure.
11208    ///
11209    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
11210    ///
11211    /// By default the server handles unexpected closure of a
11212    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
11213    /// first) by failing the buffer collection. Partly this is to expedite
11214    /// closing VMO handles to reclaim memory when any participant fails. If a
11215    /// participant would like to cleanly close a `BufferCollection` without
11216    /// causing buffer collection failure, the participant can send `Release`
11217    /// before closing the `BufferCollection` client end. The `Release` can
11218    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
11219    /// buffer collection won't require constraints from this node in order to
11220    /// allocate. If after `SetConstraints`, the constraints are retained and
11221    /// aggregated, despite the lack of `BufferCollection` connection at the
11222    /// time of constraints aggregation.
11223    ///
11224    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
11225    ///
11226    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
11227    /// end (without `Release` first) will trigger failure of the buffer
11228    /// collection. To close a `BufferCollectionTokenGroup` channel without
11229    /// failing the buffer collection, ensure that AllChildrenPresent() has been
11230    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
11231    /// client end.
11232    ///
11233    /// If `Release` occurs before
11234    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
11235    /// buffer collection will fail (triggered by reception of `Release` without
11236    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
11237    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
11238    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
11239    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
11240    /// close requires `AllChildrenPresent` (if not already sent), then
11241    /// `Release`, then close client end.
11242    ///
11243    /// If `Release` occurs after `AllChildrenPresent`, the children and all
11244    /// their constraints remain intact (just as they would if the
11245    /// `BufferCollectionTokenGroup` channel had remained open), and the client
11246    /// end close doesn't trigger buffer collection failure.
11247    ///
11248    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
11249    ///
11250    /// For brevity, the per-channel-protocol paragraphs above ignore the
11251    /// separate failure domain created by
11252    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11253    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
11254    /// unexpectedly closes (without `Release` first) and that client end is
11255    /// under a failure domain, instead of failing the whole buffer collection,
11256    /// the failure domain is failed, but the buffer collection itself is
11257    /// isolated from failure of the failure domain. Such failure domains can be
11258    /// nested, in which case only the inner-most failure domain in which the
11259    /// `Node` resides fails.
11260    Release { control_handle: BufferCollectionTokenGroupControlHandle },
11261    /// Set a name for VMOs in this buffer collection.
11262    ///
11263    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
11264    /// will be truncated to fit. The name of the vmo will be suffixed with the
11265    /// buffer index within the collection (if the suffix fits within
11266    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
11267    /// listed in the inspect data.
11268    ///
11269    /// The name only affects VMOs allocated after the name is set; this call
11270    /// does not rename existing VMOs. If multiple clients set different names
11271    /// then the larger priority value will win. Setting a new name with the
11272    /// same priority as a prior name doesn't change the name.
11273    ///
11274    /// All table fields are currently required.
11275    ///
11276    /// + request `priority` The name is only set if this is the first `SetName`
11277    ///   or if `priority` is greater than any previous `priority` value in
11278    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
11279    /// + request `name` The name for VMOs created under this buffer collection.
11280    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenGroupControlHandle },
11281    /// Set information about the current client that can be used by sysmem to
11282    /// help diagnose leaking memory and allocation stalls waiting for a
11283    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
11284    ///
11285    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
11286    /// `Node`(s) derived from this `Node`, unless overriden by
11287    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
11288    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
11289    ///
11290    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
11291    /// `Allocator` is the most efficient way to ensure that all
11292    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
11293    /// set, and is also more efficient than separately sending the same debug
11294    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
11295    /// created [`fuchsia.sysmem2/Node`].
11296    ///
11297    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
11298    /// indicate which client is closing their channel first, leading to subtree
11299    /// failure (which can be normal if the purpose of the subtree is over, but
11300    /// if happening earlier than expected, the client-channel-specific name can
11301    /// help diagnose where the failure is first coming from, from sysmem's
11302    /// point of view).
11303    ///
11304    /// All table fields are currently required.
11305    ///
11306    /// + request `name` This can be an arbitrary string, but the current
11307    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
11308    /// + request `id` This can be an arbitrary id, but the current process ID
11309    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
11310    SetDebugClientInfo {
11311        payload: NodeSetDebugClientInfoRequest,
11312        control_handle: BufferCollectionTokenGroupControlHandle,
11313    },
11314    /// Sysmem logs a warning if sysmem hasn't seen
11315    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
11316    /// within 5 seconds after creation of a new collection.
11317    ///
11318    /// Clients can call this method to change when the log is printed. If
11319    /// multiple client set the deadline, it's unspecified which deadline will
11320    /// take effect.
11321    ///
11322    /// In most cases the default works well.
11323    ///
11324    /// All table fields are currently required.
11325    ///
11326    /// + request `deadline` The time at which sysmem will start trying to log
11327    ///   the warning, unless all constraints are with sysmem by then.
11328    SetDebugTimeoutLogDeadline {
11329        payload: NodeSetDebugTimeoutLogDeadlineRequest,
11330        control_handle: BufferCollectionTokenGroupControlHandle,
11331    },
11332    /// This enables verbose logging for the buffer collection.
11333    ///
11334    /// Verbose logging includes constraints set via
11335    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
11336    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
11337    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
11338    /// the tree of `Node`(s).
11339    ///
11340    /// Normally sysmem prints only a single line complaint when aggregation
11341    /// fails, with just the specific detailed reason that aggregation failed,
11342    /// with little surrounding context.  While this is often enough to diagnose
11343    /// a problem if only a small change was made and everything was working
11344    /// before the small change, it's often not particularly helpful for getting
11345    /// a new buffer collection to work for the first time.  Especially with
11346    /// more complex trees of nodes, involving things like
11347    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
11348    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
11349    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
11350    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
11351    /// looks like and why it's failing a logical allocation, or why a tree or
11352    /// subtree is failing sooner than expected.
11353    ///
11354    /// The intent of the extra logging is to be acceptable from a performance
11355    /// point of view, under the assumption that verbose logging is only enabled
11356    /// on a low number of buffer collections. If we're not tracking down a bug,
11357    /// we shouldn't send this message.
11358    SetVerboseLogging { control_handle: BufferCollectionTokenGroupControlHandle },
11359    /// This gets a handle that can be used as a parameter to
11360    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
11361    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
11362    /// client obtained this handle from this `Node`.
11363    ///
11364    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
11365    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
11366    /// despite the two calls typically being on different channels.
11367    ///
11368    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
11369    ///
11370    /// All table fields are currently required.
11371    ///
11372    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
11373    ///   different `Node` channel, to prove that the client obtained the handle
11374    ///   from this `Node`.
11375    GetNodeRef { responder: BufferCollectionTokenGroupGetNodeRefResponder },
11376    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
11377    /// rooted at a different child token of a common parent
11378    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
11379    /// passed-in `node_ref`.
11380    ///
11381    /// This call is for assisting with admission control de-duplication, and
11382    /// with debugging.
11383    ///
11384    /// The `node_ref` must be obtained using
11385    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
11386    ///
11387    /// The `node_ref` can be a duplicated handle; it's not necessary to call
11388    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
11389    ///
11390    /// If a calling token may not actually be a valid token at all due to a
11391    /// potentially hostile/untrusted provider of the token, call
11392    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
11393    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
11394    /// never responds due to a calling token not being a real token (not really
11395    /// talking to sysmem).  Another option is to call
11396    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
11397    /// which also validates the token along with converting it to a
11398    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
11399    ///
11400    /// All table fields are currently required.
11401    ///
11402    /// - response `is_alternate`
11403    ///   - true: The first parent node in common between the calling node and
11404    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
11405    ///     that the calling `Node` and the `node_ref` `Node` will not have both
11406    ///     their constraints apply - rather sysmem will choose one or the other
11407    ///     of the constraints - never both.  This is because only one child of
11408    ///     a `BufferCollectionTokenGroup` is selected during logical
11409    ///     allocation, with only that one child's subtree contributing to
11410    ///     constraints aggregation.
11411    ///   - false: The first parent node in common between the calling `Node`
11412    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
11413    ///     Currently, this means the first parent node in common is a
11414    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
11415    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
11416    ///     `Node` may have both their constraints apply during constraints
11417    ///     aggregation of the logical allocation, if both `Node`(s) are
11418    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
11419    ///     this case, there is no `BufferCollectionTokenGroup` that will
11420    ///     directly prevent the two `Node`(s) from both being selected and
11421    ///     their constraints both aggregated, but even when false, one or both
11422    ///     `Node`(s) may still be eliminated from consideration if one or both
11423    ///     `Node`(s) has a direct or indirect parent
11424    ///     `BufferCollectionTokenGroup` which selects a child subtree other
11425    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
11426    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
11427    ///   associated with the same buffer collection as the calling `Node`.
11428    ///   Another reason for this error is if the `node_ref` is an
11429    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
11430    ///   a real `node_ref` obtained from `GetNodeRef`.
11431    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
11432    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
11433    ///   the needed rights expected on a real `node_ref`.
11434    /// * No other failing status codes are returned by this call.  However,
11435    ///   sysmem may add additional codes in future, so the client should have
11436    ///   sensible default handling for any failing status code.
11437    IsAlternateFor {
11438        payload: NodeIsAlternateForRequest,
11439        responder: BufferCollectionTokenGroupIsAlternateForResponder,
11440    },
11441    /// Get the buffer collection ID. This ID is also available from
11442    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
11443    /// within the collection).
11444    ///
11445    /// This call is mainly useful in situations where we can't convey a
11446    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
11447    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
11448    /// handle, which can be joined back up with a `BufferCollection` client end
11449    /// that was created via a different path. Prefer to convey a
11450    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
11451    ///
11452    /// Trusting a `buffer_collection_id` value from a source other than sysmem
11453    /// is analogous to trusting a koid value from a source other than zircon.
11454    /// Both should be avoided unless really necessary, and both require
11455    /// caution. In some situations it may be reasonable to refer to a
11456    /// pre-established `BufferCollection` by `buffer_collection_id` via a
11457    /// protocol for efficiency reasons, but an incoming value purporting to be
11458    /// a `buffer_collection_id` is not sufficient alone to justify granting the
11459    /// sender of the `buffer_collection_id` any capability. The sender must
11460    /// first prove to a receiver that the sender has/had a VMO or has/had a
11461    /// `BufferCollectionToken` to the same collection by sending a handle that
11462    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
11463    /// `buffer_collection_id` value. The receiver should take care to avoid
11464    /// assuming that a sender had a `BufferCollectionToken` in cases where the
11465    /// sender has only proven that the sender had a VMO.
11466    ///
11467    /// - response `buffer_collection_id` This ID is unique per buffer
11468    ///   collection per boot. Each buffer is uniquely identified by the
11469    ///   `buffer_collection_id` and `buffer_index` together.
11470    GetBufferCollectionId { responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder },
11471    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
11472    /// created after this message to weak, which means that a client's `Node`
11473    /// client end (or a child created after this message) is not alone
11474    /// sufficient to keep allocated VMOs alive.
11475    ///
11476    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
11477    /// `close_weak_asap`.
11478    ///
11479    /// This message is only permitted before the `Node` becomes ready for
11480    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
11481    ///   * `BufferCollectionToken`: any time
11482    ///   * `BufferCollection`: before `SetConstraints`
11483    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
11484    ///
11485    /// Currently, no conversion from strong `Node` to weak `Node` after ready
11486    /// for allocation is provided, but a client can simulate that by creating
11487    /// an additional `Node` before allocation and setting that additional
11488    /// `Node` to weak, and then potentially at some point later sending
11489    /// `Release` and closing the client end of the client's strong `Node`, but
11490    /// keeping the client's weak `Node`.
11491    ///
11492    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
11493    /// collection failure (all `Node` client end(s) will see
11494    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
11495    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
11496    /// this situation until all `Node`(s) are ready for allocation. For initial
11497    /// allocation to succeed, at least one strong `Node` is required to exist
11498    /// at allocation time, but after that client receives VMO handles, that
11499    /// client can `BufferCollection.Release` and close the client end without
11500    /// causing this type of failure.
11501    ///
11502    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
11503    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
11504    /// separately as appropriate.
11505    SetWeak { control_handle: BufferCollectionTokenGroupControlHandle },
11506    /// This indicates to sysmem that the client is prepared to pay attention to
11507    /// `close_weak_asap`.
11508    ///
11509    /// If sent, this message must be before
11510    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
11511    ///
11512    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
11513    /// send this message before `WaitForAllBuffersAllocated`, or a parent
11514    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
11515    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
11516    /// trigger buffer collection failure.
11517    ///
11518    /// This message is necessary because weak sysmem VMOs have not always been
11519    /// a thing, so older clients are not aware of the need to pay attention to
11520    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
11521    /// sysmem weak VMO handles asap. By having this message and requiring
11522    /// participants to indicate their acceptance of this aspect of the overall
11523    /// protocol, we avoid situations where an older client is delivered a weak
11524    /// VMO without any way for sysmem to get that VMO to close quickly later
11525    /// (and on a per-buffer basis).
11526    ///
11527    /// A participant that doesn't handle `close_weak_asap` and also doesn't
11528    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
11529    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
11530    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
11531    /// same participant has a child/delegate which does retrieve VMOs, that
11532    /// child/delegate will need to send `SetWeakOk` before
11533    /// `WaitForAllBuffersAllocated`.
11534    ///
11535    /// + request `for_child_nodes_also` If present and true, this means direct
11536    ///   child nodes of this node created after this message plus all
11537    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
11538    ///   those nodes. Any child node of this node that was created before this
11539    ///   message is not included. This setting is "sticky" in the sense that a
11540    ///   subsequent `SetWeakOk` without this bool set to true does not reset
11541    ///   the server-side bool. If this creates a problem for a participant, a
11542    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
11543    ///   tokens instead, as appropriate. A participant should only set
11544    ///   `for_child_nodes_also` true if the participant can really promise to
11545    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
11546    ///   weak VMO handles held by participants holding the corresponding child
11547    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
11548    ///   which are using sysmem(1) can be weak, despite the clients of those
11549    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
11550    ///   direct way to find out about `close_weak_asap`. This only applies to
11551    ///   descendents of this `Node` which are using sysmem(1), not to this
11552    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
11553    ///   token, which will fail allocation unless an ancestor of this `Node`
11554    ///   specified `for_child_nodes_also` true.
11555    SetWeakOk {
11556        payload: NodeSetWeakOkRequest,
11557        control_handle: BufferCollectionTokenGroupControlHandle,
11558    },
11559    /// The server_end will be closed after this `Node` and any child nodes have
11560    /// have released their buffer counts, making those counts available for
11561    /// reservation by a different `Node` via
11562    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
11563    ///
11564    /// The `Node` buffer counts may not be released until the entire tree of
11565    /// `Node`(s) is closed or failed, because
11566    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
11567    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
11568    /// `Node` buffer counts remain reserved until the orphaned node is later
11569    /// cleaned up.
11570    ///
11571    /// If the `Node` exceeds a fairly large number of attached eventpair server
11572    /// ends, a log message will indicate this and the `Node` (and the
11573    /// appropriate) sub-tree will fail.
11574    ///
11575    /// The `server_end` will remain open when
11576    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
11577    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
11578    /// [`fuchsia.sysmem2/BufferCollection`].
11579    ///
11580    /// This message can also be used with a
11581    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
11582    AttachNodeTracking {
11583        payload: NodeAttachNodeTrackingRequest,
11584        control_handle: BufferCollectionTokenGroupControlHandle,
11585    },
11586    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
11587    /// (including its children) will be selected during allocation (or logical
11588    /// allocation).
11589    ///
11590    /// Before passing the client end of this token to
11591    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
11592    /// [`fuchsia.sysmem2/Node.Sync`] after
11593    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
11594    /// Or the client can use
11595    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
11596    /// essentially includes the `Sync`.
11597    ///
11598    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11599    /// fail the group's subtree and close the connection.
11600    ///
11601    /// After all children have been created, send AllChildrenPresent.
11602    ///
11603    /// + request `token_request` The server end of the new token channel.
11604    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
11605    ///   token allows the holder to get the same rights to buffers as the
11606    ///   parent token (of the group) had. When the value isn't
11607    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
11608    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
11609    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
11610    ///   causes subtree failure.
11611    CreateChild {
11612        payload: BufferCollectionTokenGroupCreateChildRequest,
11613        control_handle: BufferCollectionTokenGroupControlHandle,
11614    },
11615    /// Create 1 or more child tokens at once, synchronously.  In contrast to
11616    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
11617    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
11618    /// of a returned token to
11619    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
11620    ///
11621    /// The lower-index child tokens are higher priority (attempted sooner) than
11622    /// higher-index child tokens.
11623    ///
11624    /// As per all child tokens, successful aggregation will choose exactly one
11625    /// child among all created children (across all children created across
11626    /// potentially multiple calls to
11627    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
11628    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
11629    ///
11630    /// The maximum permissible total number of children per group, and total
11631    /// number of nodes in an overall tree (from the root) are capped to limits
11632    /// which are not configurable via these protocols.
11633    ///
11634    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
11635    /// this will fail the group's subtree and close the connection.
11636    ///
11637    /// After all children have been created, send AllChildrenPresent.
11638    ///
11639    /// + request `rights_attentuation_masks` The size of the
11640    ///   `rights_attentuation_masks` determines the number of created child
11641    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
11642    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
11643    ///   other value, each 0 bit in the mask attenuates that right.
11644    /// - response `tokens` The created child tokens.
11645    CreateChildrenSync {
11646        payload: BufferCollectionTokenGroupCreateChildrenSyncRequest,
11647        responder: BufferCollectionTokenGroupCreateChildrenSyncResponder,
11648    },
11649    /// Indicate that no more children will be created.
11650    ///
11651    /// After creating all children, the client should send
11652    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
11653    /// inform sysmem that no more children will be created, so that sysmem can
11654    /// know when it's ok to start aggregating constraints.
11655    ///
11656    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11657    /// fail the group's subtree and close the connection.
11658    ///
11659    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
11660    /// after `AllChildrenPresent`, else failure of the group's subtree will be
11661    /// triggered. This is intentionally not analogous to how `Release` without
11662    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
11663    /// subtree failure.
11664    AllChildrenPresent { control_handle: BufferCollectionTokenGroupControlHandle },
11665    /// An interaction was received which does not match any known method.
11666    #[non_exhaustive]
11667    _UnknownMethod {
11668        /// Ordinal of the method that was called.
11669        ordinal: u64,
11670        control_handle: BufferCollectionTokenGroupControlHandle,
11671        method_type: fidl::MethodType,
11672    },
11673}
11674
11675impl BufferCollectionTokenGroupRequest {
11676    #[allow(irrefutable_let_patterns)]
11677    pub fn into_sync(self) -> Option<(BufferCollectionTokenGroupSyncResponder)> {
11678        if let BufferCollectionTokenGroupRequest::Sync { responder } = self {
11679            Some((responder))
11680        } else {
11681            None
11682        }
11683    }
11684
11685    #[allow(irrefutable_let_patterns)]
11686    pub fn into_release(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11687        if let BufferCollectionTokenGroupRequest::Release { control_handle } = self {
11688            Some((control_handle))
11689        } else {
11690            None
11691        }
11692    }
11693
11694    #[allow(irrefutable_let_patterns)]
11695    pub fn into_set_name(
11696        self,
11697    ) -> Option<(NodeSetNameRequest, BufferCollectionTokenGroupControlHandle)> {
11698        if let BufferCollectionTokenGroupRequest::SetName { payload, control_handle } = self {
11699            Some((payload, control_handle))
11700        } else {
11701            None
11702        }
11703    }
11704
11705    #[allow(irrefutable_let_patterns)]
11706    pub fn into_set_debug_client_info(
11707        self,
11708    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenGroupControlHandle)> {
11709        if let BufferCollectionTokenGroupRequest::SetDebugClientInfo { payload, control_handle } =
11710            self
11711        {
11712            Some((payload, control_handle))
11713        } else {
11714            None
11715        }
11716    }
11717
11718    #[allow(irrefutable_let_patterns)]
11719    pub fn into_set_debug_timeout_log_deadline(
11720        self,
11721    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenGroupControlHandle)>
11722    {
11723        if let BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {
11724            payload,
11725            control_handle,
11726        } = self
11727        {
11728            Some((payload, control_handle))
11729        } else {
11730            None
11731        }
11732    }
11733
11734    #[allow(irrefutable_let_patterns)]
11735    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11736        if let BufferCollectionTokenGroupRequest::SetVerboseLogging { control_handle } = self {
11737            Some((control_handle))
11738        } else {
11739            None
11740        }
11741    }
11742
11743    #[allow(irrefutable_let_patterns)]
11744    pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGroupGetNodeRefResponder)> {
11745        if let BufferCollectionTokenGroupRequest::GetNodeRef { responder } = self {
11746            Some((responder))
11747        } else {
11748            None
11749        }
11750    }
11751
11752    #[allow(irrefutable_let_patterns)]
11753    pub fn into_is_alternate_for(
11754        self,
11755    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenGroupIsAlternateForResponder)>
11756    {
11757        if let BufferCollectionTokenGroupRequest::IsAlternateFor { payload, responder } = self {
11758            Some((payload, responder))
11759        } else {
11760            None
11761        }
11762    }
11763
11764    #[allow(irrefutable_let_patterns)]
11765    pub fn into_get_buffer_collection_id(
11766        self,
11767    ) -> Option<(BufferCollectionTokenGroupGetBufferCollectionIdResponder)> {
11768        if let BufferCollectionTokenGroupRequest::GetBufferCollectionId { responder } = self {
11769            Some((responder))
11770        } else {
11771            None
11772        }
11773    }
11774
11775    #[allow(irrefutable_let_patterns)]
11776    pub fn into_set_weak(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11777        if let BufferCollectionTokenGroupRequest::SetWeak { control_handle } = self {
11778            Some((control_handle))
11779        } else {
11780            None
11781        }
11782    }
11783
11784    #[allow(irrefutable_let_patterns)]
11785    pub fn into_set_weak_ok(
11786        self,
11787    ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenGroupControlHandle)> {
11788        if let BufferCollectionTokenGroupRequest::SetWeakOk { payload, control_handle } = self {
11789            Some((payload, control_handle))
11790        } else {
11791            None
11792        }
11793    }
11794
11795    #[allow(irrefutable_let_patterns)]
11796    pub fn into_attach_node_tracking(
11797        self,
11798    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenGroupControlHandle)> {
11799        if let BufferCollectionTokenGroupRequest::AttachNodeTracking { payload, control_handle } =
11800            self
11801        {
11802            Some((payload, control_handle))
11803        } else {
11804            None
11805        }
11806    }
11807
11808    #[allow(irrefutable_let_patterns)]
11809    pub fn into_create_child(
11810        self,
11811    ) -> Option<(
11812        BufferCollectionTokenGroupCreateChildRequest,
11813        BufferCollectionTokenGroupControlHandle,
11814    )> {
11815        if let BufferCollectionTokenGroupRequest::CreateChild { payload, control_handle } = self {
11816            Some((payload, control_handle))
11817        } else {
11818            None
11819        }
11820    }
11821
11822    #[allow(irrefutable_let_patterns)]
11823    pub fn into_create_children_sync(
11824        self,
11825    ) -> Option<(
11826        BufferCollectionTokenGroupCreateChildrenSyncRequest,
11827        BufferCollectionTokenGroupCreateChildrenSyncResponder,
11828    )> {
11829        if let BufferCollectionTokenGroupRequest::CreateChildrenSync { payload, responder } = self {
11830            Some((payload, responder))
11831        } else {
11832            None
11833        }
11834    }
11835
11836    #[allow(irrefutable_let_patterns)]
11837    pub fn into_all_children_present(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11838        if let BufferCollectionTokenGroupRequest::AllChildrenPresent { control_handle } = self {
11839            Some((control_handle))
11840        } else {
11841            None
11842        }
11843    }
11844
11845    /// Name of the method defined in FIDL
11846    pub fn method_name(&self) -> &'static str {
11847        match *self {
11848            BufferCollectionTokenGroupRequest::Sync { .. } => "sync",
11849            BufferCollectionTokenGroupRequest::Release { .. } => "release",
11850            BufferCollectionTokenGroupRequest::SetName { .. } => "set_name",
11851            BufferCollectionTokenGroupRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
11852            BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline { .. } => {
11853                "set_debug_timeout_log_deadline"
11854            }
11855            BufferCollectionTokenGroupRequest::SetVerboseLogging { .. } => "set_verbose_logging",
11856            BufferCollectionTokenGroupRequest::GetNodeRef { .. } => "get_node_ref",
11857            BufferCollectionTokenGroupRequest::IsAlternateFor { .. } => "is_alternate_for",
11858            BufferCollectionTokenGroupRequest::GetBufferCollectionId { .. } => {
11859                "get_buffer_collection_id"
11860            }
11861            BufferCollectionTokenGroupRequest::SetWeak { .. } => "set_weak",
11862            BufferCollectionTokenGroupRequest::SetWeakOk { .. } => "set_weak_ok",
11863            BufferCollectionTokenGroupRequest::AttachNodeTracking { .. } => "attach_node_tracking",
11864            BufferCollectionTokenGroupRequest::CreateChild { .. } => "create_child",
11865            BufferCollectionTokenGroupRequest::CreateChildrenSync { .. } => "create_children_sync",
11866            BufferCollectionTokenGroupRequest::AllChildrenPresent { .. } => "all_children_present",
11867            BufferCollectionTokenGroupRequest::_UnknownMethod {
11868                method_type: fidl::MethodType::OneWay,
11869                ..
11870            } => "unknown one-way method",
11871            BufferCollectionTokenGroupRequest::_UnknownMethod {
11872                method_type: fidl::MethodType::TwoWay,
11873                ..
11874            } => "unknown two-way method",
11875        }
11876    }
11877}
11878
11879#[derive(Debug, Clone)]
11880pub struct BufferCollectionTokenGroupControlHandle {
11881    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
11882}
11883
11884impl fidl::endpoints::ControlHandle for BufferCollectionTokenGroupControlHandle {
11885    fn shutdown(&self) {
11886        self.inner.shutdown()
11887    }
11888    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
11889        self.inner.shutdown_with_epitaph(status)
11890    }
11891
11892    fn is_closed(&self) -> bool {
11893        self.inner.channel().is_closed()
11894    }
11895    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
11896        self.inner.channel().on_closed()
11897    }
11898
11899    #[cfg(target_os = "fuchsia")]
11900    fn signal_peer(
11901        &self,
11902        clear_mask: zx::Signals,
11903        set_mask: zx::Signals,
11904    ) -> Result<(), zx_status::Status> {
11905        use fidl::Peered;
11906        self.inner.channel().signal_peer(clear_mask, set_mask)
11907    }
11908}
11909
11910impl BufferCollectionTokenGroupControlHandle {}
11911
11912#[must_use = "FIDL methods require a response to be sent"]
11913#[derive(Debug)]
11914pub struct BufferCollectionTokenGroupSyncResponder {
11915    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11916    tx_id: u32,
11917}
11918
11919/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11920/// if the responder is dropped without sending a response, so that the client
11921/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11922impl std::ops::Drop for BufferCollectionTokenGroupSyncResponder {
11923    fn drop(&mut self) {
11924        self.control_handle.shutdown();
11925        // Safety: drops once, never accessed again
11926        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11927    }
11928}
11929
11930impl fidl::endpoints::Responder for BufferCollectionTokenGroupSyncResponder {
11931    type ControlHandle = BufferCollectionTokenGroupControlHandle;
11932
11933    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11934        &self.control_handle
11935    }
11936
11937    fn drop_without_shutdown(mut self) {
11938        // Safety: drops once, never accessed again due to mem::forget
11939        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11940        // Prevent Drop from running (which would shut down the channel)
11941        std::mem::forget(self);
11942    }
11943}
11944
11945impl BufferCollectionTokenGroupSyncResponder {
11946    /// Sends a response to the FIDL transaction.
11947    ///
11948    /// Sets the channel to shutdown if an error occurs.
11949    pub fn send(self) -> Result<(), fidl::Error> {
11950        let _result = self.send_raw();
11951        if _result.is_err() {
11952            self.control_handle.shutdown();
11953        }
11954        self.drop_without_shutdown();
11955        _result
11956    }
11957
11958    /// Similar to "send" but does not shutdown the channel if an error occurs.
11959    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
11960        let _result = self.send_raw();
11961        self.drop_without_shutdown();
11962        _result
11963    }
11964
11965    fn send_raw(&self) -> Result<(), fidl::Error> {
11966        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
11967            fidl::encoding::Flexible::new(()),
11968            self.tx_id,
11969            0x11ac2555cf575b54,
11970            fidl::encoding::DynamicFlags::FLEXIBLE,
11971        )
11972    }
11973}
11974
11975#[must_use = "FIDL methods require a response to be sent"]
11976#[derive(Debug)]
11977pub struct BufferCollectionTokenGroupGetNodeRefResponder {
11978    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11979    tx_id: u32,
11980}
11981
11982/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11983/// if the responder is dropped without sending a response, so that the client
11984/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11985impl std::ops::Drop for BufferCollectionTokenGroupGetNodeRefResponder {
11986    fn drop(&mut self) {
11987        self.control_handle.shutdown();
11988        // Safety: drops once, never accessed again
11989        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11990    }
11991}
11992
11993impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetNodeRefResponder {
11994    type ControlHandle = BufferCollectionTokenGroupControlHandle;
11995
11996    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11997        &self.control_handle
11998    }
11999
12000    fn drop_without_shutdown(mut self) {
12001        // Safety: drops once, never accessed again due to mem::forget
12002        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12003        // Prevent Drop from running (which would shut down the channel)
12004        std::mem::forget(self);
12005    }
12006}
12007
12008impl BufferCollectionTokenGroupGetNodeRefResponder {
12009    /// Sends a response to the FIDL transaction.
12010    ///
12011    /// Sets the channel to shutdown if an error occurs.
12012    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
12013        let _result = self.send_raw(payload);
12014        if _result.is_err() {
12015            self.control_handle.shutdown();
12016        }
12017        self.drop_without_shutdown();
12018        _result
12019    }
12020
12021    /// Similar to "send" but does not shutdown the channel if an error occurs.
12022    pub fn send_no_shutdown_on_err(
12023        self,
12024        mut payload: NodeGetNodeRefResponse,
12025    ) -> Result<(), fidl::Error> {
12026        let _result = self.send_raw(payload);
12027        self.drop_without_shutdown();
12028        _result
12029    }
12030
12031    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
12032        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
12033            fidl::encoding::Flexible::new(&mut payload),
12034            self.tx_id,
12035            0x5b3d0e51614df053,
12036            fidl::encoding::DynamicFlags::FLEXIBLE,
12037        )
12038    }
12039}
12040
12041#[must_use = "FIDL methods require a response to be sent"]
12042#[derive(Debug)]
12043pub struct BufferCollectionTokenGroupIsAlternateForResponder {
12044    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12045    tx_id: u32,
12046}
12047
12048/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12049/// if the responder is dropped without sending a response, so that the client
12050/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12051impl std::ops::Drop for BufferCollectionTokenGroupIsAlternateForResponder {
12052    fn drop(&mut self) {
12053        self.control_handle.shutdown();
12054        // Safety: drops once, never accessed again
12055        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12056    }
12057}
12058
12059impl fidl::endpoints::Responder for BufferCollectionTokenGroupIsAlternateForResponder {
12060    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12061
12062    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12063        &self.control_handle
12064    }
12065
12066    fn drop_without_shutdown(mut self) {
12067        // Safety: drops once, never accessed again due to mem::forget
12068        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12069        // Prevent Drop from running (which would shut down the channel)
12070        std::mem::forget(self);
12071    }
12072}
12073
12074impl BufferCollectionTokenGroupIsAlternateForResponder {
12075    /// Sends a response to the FIDL transaction.
12076    ///
12077    /// Sets the channel to shutdown if an error occurs.
12078    pub fn send(
12079        self,
12080        mut result: Result<&NodeIsAlternateForResponse, Error>,
12081    ) -> Result<(), fidl::Error> {
12082        let _result = self.send_raw(result);
12083        if _result.is_err() {
12084            self.control_handle.shutdown();
12085        }
12086        self.drop_without_shutdown();
12087        _result
12088    }
12089
12090    /// Similar to "send" but does not shutdown the channel if an error occurs.
12091    pub fn send_no_shutdown_on_err(
12092        self,
12093        mut result: Result<&NodeIsAlternateForResponse, Error>,
12094    ) -> Result<(), fidl::Error> {
12095        let _result = self.send_raw(result);
12096        self.drop_without_shutdown();
12097        _result
12098    }
12099
12100    fn send_raw(
12101        &self,
12102        mut result: Result<&NodeIsAlternateForResponse, Error>,
12103    ) -> Result<(), fidl::Error> {
12104        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
12105            NodeIsAlternateForResponse,
12106            Error,
12107        >>(
12108            fidl::encoding::FlexibleResult::new(result),
12109            self.tx_id,
12110            0x3a58e00157e0825,
12111            fidl::encoding::DynamicFlags::FLEXIBLE,
12112        )
12113    }
12114}
12115
12116#[must_use = "FIDL methods require a response to be sent"]
12117#[derive(Debug)]
12118pub struct BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12119    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12120    tx_id: u32,
12121}
12122
12123/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12124/// if the responder is dropped without sending a response, so that the client
12125/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12126impl std::ops::Drop for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12127    fn drop(&mut self) {
12128        self.control_handle.shutdown();
12129        // Safety: drops once, never accessed again
12130        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12131    }
12132}
12133
12134impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12135    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12136
12137    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12138        &self.control_handle
12139    }
12140
12141    fn drop_without_shutdown(mut self) {
12142        // Safety: drops once, never accessed again due to mem::forget
12143        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12144        // Prevent Drop from running (which would shut down the channel)
12145        std::mem::forget(self);
12146    }
12147}
12148
12149impl BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12150    /// Sends a response to the FIDL transaction.
12151    ///
12152    /// Sets the channel to shutdown if an error occurs.
12153    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12154        let _result = self.send_raw(payload);
12155        if _result.is_err() {
12156            self.control_handle.shutdown();
12157        }
12158        self.drop_without_shutdown();
12159        _result
12160    }
12161
12162    /// Similar to "send" but does not shutdown the channel if an error occurs.
12163    pub fn send_no_shutdown_on_err(
12164        self,
12165        mut payload: &NodeGetBufferCollectionIdResponse,
12166    ) -> Result<(), fidl::Error> {
12167        let _result = self.send_raw(payload);
12168        self.drop_without_shutdown();
12169        _result
12170    }
12171
12172    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12173        self.control_handle
12174            .inner
12175            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
12176                fidl::encoding::Flexible::new(payload),
12177                self.tx_id,
12178                0x77d19a494b78ba8c,
12179                fidl::encoding::DynamicFlags::FLEXIBLE,
12180            )
12181    }
12182}
12183
12184#[must_use = "FIDL methods require a response to be sent"]
12185#[derive(Debug)]
12186pub struct BufferCollectionTokenGroupCreateChildrenSyncResponder {
12187    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12188    tx_id: u32,
12189}
12190
12191/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12192/// if the responder is dropped without sending a response, so that the client
12193/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12194impl std::ops::Drop for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12195    fn drop(&mut self) {
12196        self.control_handle.shutdown();
12197        // Safety: drops once, never accessed again
12198        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12199    }
12200}
12201
12202impl fidl::endpoints::Responder for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12203    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12204
12205    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12206        &self.control_handle
12207    }
12208
12209    fn drop_without_shutdown(mut self) {
12210        // Safety: drops once, never accessed again due to mem::forget
12211        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12212        // Prevent Drop from running (which would shut down the channel)
12213        std::mem::forget(self);
12214    }
12215}
12216
12217impl BufferCollectionTokenGroupCreateChildrenSyncResponder {
12218    /// Sends a response to the FIDL transaction.
12219    ///
12220    /// Sets the channel to shutdown if an error occurs.
12221    pub fn send(
12222        self,
12223        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12224    ) -> Result<(), fidl::Error> {
12225        let _result = self.send_raw(payload);
12226        if _result.is_err() {
12227            self.control_handle.shutdown();
12228        }
12229        self.drop_without_shutdown();
12230        _result
12231    }
12232
12233    /// Similar to "send" but does not shutdown the channel if an error occurs.
12234    pub fn send_no_shutdown_on_err(
12235        self,
12236        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12237    ) -> Result<(), fidl::Error> {
12238        let _result = self.send_raw(payload);
12239        self.drop_without_shutdown();
12240        _result
12241    }
12242
12243    fn send_raw(
12244        &self,
12245        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12246    ) -> Result<(), fidl::Error> {
12247        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
12248            BufferCollectionTokenGroupCreateChildrenSyncResponse,
12249        >>(
12250            fidl::encoding::Flexible::new(&mut payload),
12251            self.tx_id,
12252            0x15dea448c536070a,
12253            fidl::encoding::DynamicFlags::FLEXIBLE,
12254        )
12255    }
12256}
12257
12258#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
12259pub struct NodeMarker;
12260
12261impl fidl::endpoints::ProtocolMarker for NodeMarker {
12262    type Proxy = NodeProxy;
12263    type RequestStream = NodeRequestStream;
12264    #[cfg(target_os = "fuchsia")]
12265    type SynchronousProxy = NodeSynchronousProxy;
12266
12267    const DEBUG_NAME: &'static str = "(anonymous) Node";
12268}
12269pub type NodeIsAlternateForResult = Result<NodeIsAlternateForResponse, Error>;
12270
12271pub trait NodeProxyInterface: Send + Sync {
12272    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
12273    fn r#sync(&self) -> Self::SyncResponseFut;
12274    fn r#release(&self) -> Result<(), fidl::Error>;
12275    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
12276    fn r#set_debug_client_info(
12277        &self,
12278        payload: &NodeSetDebugClientInfoRequest,
12279    ) -> Result<(), fidl::Error>;
12280    fn r#set_debug_timeout_log_deadline(
12281        &self,
12282        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12283    ) -> Result<(), fidl::Error>;
12284    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
12285    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
12286        + Send;
12287    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
12288    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
12289        + Send;
12290    fn r#is_alternate_for(
12291        &self,
12292        payload: NodeIsAlternateForRequest,
12293    ) -> Self::IsAlternateForResponseFut;
12294    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
12295        + Send;
12296    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
12297    fn r#set_weak(&self) -> Result<(), fidl::Error>;
12298    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
12299    fn r#attach_node_tracking(
12300        &self,
12301        payload: NodeAttachNodeTrackingRequest,
12302    ) -> Result<(), fidl::Error>;
12303}
12304#[derive(Debug)]
12305#[cfg(target_os = "fuchsia")]
12306pub struct NodeSynchronousProxy {
12307    client: fidl::client::sync::Client,
12308}
12309
12310#[cfg(target_os = "fuchsia")]
12311impl fidl::endpoints::SynchronousProxy for NodeSynchronousProxy {
12312    type Proxy = NodeProxy;
12313    type Protocol = NodeMarker;
12314
12315    fn from_channel(inner: fidl::Channel) -> Self {
12316        Self::new(inner)
12317    }
12318
12319    fn into_channel(self) -> fidl::Channel {
12320        self.client.into_channel()
12321    }
12322
12323    fn as_channel(&self) -> &fidl::Channel {
12324        self.client.as_channel()
12325    }
12326}
12327
12328#[cfg(target_os = "fuchsia")]
12329impl NodeSynchronousProxy {
12330    pub fn new(channel: fidl::Channel) -> Self {
12331        let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12332        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
12333    }
12334
12335    pub fn into_channel(self) -> fidl::Channel {
12336        self.client.into_channel()
12337    }
12338
12339    /// Waits until an event arrives and returns it. It is safe for other
12340    /// threads to make concurrent requests while waiting for an event.
12341    pub fn wait_for_event(&self, deadline: zx::MonotonicInstant) -> Result<NodeEvent, fidl::Error> {
12342        NodeEvent::decode(self.client.wait_for_event(deadline)?)
12343    }
12344
12345    /// Ensure that previous messages have been received server side. This is
12346    /// particularly useful after previous messages that created new tokens,
12347    /// because a token must be known to the sysmem server before sending the
12348    /// token to another participant.
12349    ///
12350    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12351    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12352    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12353    /// to mitigate the possibility of a hostile/fake
12354    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12355    /// Another way is to pass the token to
12356    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12357    /// the token as part of exchanging it for a
12358    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12359    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12360    /// of stalling.
12361    ///
12362    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12363    /// and then starting and completing a `Sync`, it's then safe to send the
12364    /// `BufferCollectionToken` client ends to other participants knowing the
12365    /// server will recognize the tokens when they're sent by the other
12366    /// participants to sysmem in a
12367    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12368    /// efficient way to create tokens while avoiding unnecessary round trips.
12369    ///
12370    /// Other options include waiting for each
12371    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12372    /// individually (using separate call to `Sync` after each), or calling
12373    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12374    /// converted to a `BufferCollection` via
12375    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12376    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12377    /// the sync step and can create multiple tokens at once.
12378    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
12379        let _response = self.client.send_query::<
12380            fidl::encoding::EmptyPayload,
12381            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
12382        >(
12383            (),
12384            0x11ac2555cf575b54,
12385            fidl::encoding::DynamicFlags::FLEXIBLE,
12386            ___deadline,
12387        )?
12388        .into_result::<NodeMarker>("sync")?;
12389        Ok(_response)
12390    }
12391
12392    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12393    ///
12394    /// Normally a participant will convert a `BufferCollectionToken` into a
12395    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12396    /// `Release` via the token (and then close the channel immediately or
12397    /// shortly later in response to server closing the server end), which
12398    /// avoids causing buffer collection failure. Without a prior `Release`,
12399    /// closing the `BufferCollectionToken` client end will cause buffer
12400    /// collection failure.
12401    ///
12402    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12403    ///
12404    /// By default the server handles unexpected closure of a
12405    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12406    /// first) by failing the buffer collection. Partly this is to expedite
12407    /// closing VMO handles to reclaim memory when any participant fails. If a
12408    /// participant would like to cleanly close a `BufferCollection` without
12409    /// causing buffer collection failure, the participant can send `Release`
12410    /// before closing the `BufferCollection` client end. The `Release` can
12411    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12412    /// buffer collection won't require constraints from this node in order to
12413    /// allocate. If after `SetConstraints`, the constraints are retained and
12414    /// aggregated, despite the lack of `BufferCollection` connection at the
12415    /// time of constraints aggregation.
12416    ///
12417    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
12418    ///
12419    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
12420    /// end (without `Release` first) will trigger failure of the buffer
12421    /// collection. To close a `BufferCollectionTokenGroup` channel without
12422    /// failing the buffer collection, ensure that AllChildrenPresent() has been
12423    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
12424    /// client end.
12425    ///
12426    /// If `Release` occurs before
12427    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
12428    /// buffer collection will fail (triggered by reception of `Release` without
12429    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
12430    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
12431    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
12432    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
12433    /// close requires `AllChildrenPresent` (if not already sent), then
12434    /// `Release`, then close client end.
12435    ///
12436    /// If `Release` occurs after `AllChildrenPresent`, the children and all
12437    /// their constraints remain intact (just as they would if the
12438    /// `BufferCollectionTokenGroup` channel had remained open), and the client
12439    /// end close doesn't trigger buffer collection failure.
12440    ///
12441    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
12442    ///
12443    /// For brevity, the per-channel-protocol paragraphs above ignore the
12444    /// separate failure domain created by
12445    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
12446    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
12447    /// unexpectedly closes (without `Release` first) and that client end is
12448    /// under a failure domain, instead of failing the whole buffer collection,
12449    /// the failure domain is failed, but the buffer collection itself is
12450    /// isolated from failure of the failure domain. Such failure domains can be
12451    /// nested, in which case only the inner-most failure domain in which the
12452    /// `Node` resides fails.
12453    pub fn r#release(&self) -> Result<(), fidl::Error> {
12454        self.client.send::<fidl::encoding::EmptyPayload>(
12455            (),
12456            0x6a5cae7d6d6e04c6,
12457            fidl::encoding::DynamicFlags::FLEXIBLE,
12458        )
12459    }
12460
12461    /// Set a name for VMOs in this buffer collection.
12462    ///
12463    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
12464    /// will be truncated to fit. The name of the vmo will be suffixed with the
12465    /// buffer index within the collection (if the suffix fits within
12466    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
12467    /// listed in the inspect data.
12468    ///
12469    /// The name only affects VMOs allocated after the name is set; this call
12470    /// does not rename existing VMOs. If multiple clients set different names
12471    /// then the larger priority value will win. Setting a new name with the
12472    /// same priority as a prior name doesn't change the name.
12473    ///
12474    /// All table fields are currently required.
12475    ///
12476    /// + request `priority` The name is only set if this is the first `SetName`
12477    ///   or if `priority` is greater than any previous `priority` value in
12478    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
12479    /// + request `name` The name for VMOs created under this buffer collection.
12480    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
12481        self.client.send::<NodeSetNameRequest>(
12482            payload,
12483            0xb41f1624f48c1e9,
12484            fidl::encoding::DynamicFlags::FLEXIBLE,
12485        )
12486    }
12487
12488    /// Set information about the current client that can be used by sysmem to
12489    /// help diagnose leaking memory and allocation stalls waiting for a
12490    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
12491    ///
12492    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
12493    /// `Node`(s) derived from this `Node`, unless overriden by
12494    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
12495    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
12496    ///
12497    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
12498    /// `Allocator` is the most efficient way to ensure that all
12499    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
12500    /// set, and is also more efficient than separately sending the same debug
12501    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
12502    /// created [`fuchsia.sysmem2/Node`].
12503    ///
12504    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
12505    /// indicate which client is closing their channel first, leading to subtree
12506    /// failure (which can be normal if the purpose of the subtree is over, but
12507    /// if happening earlier than expected, the client-channel-specific name can
12508    /// help diagnose where the failure is first coming from, from sysmem's
12509    /// point of view).
12510    ///
12511    /// All table fields are currently required.
12512    ///
12513    /// + request `name` This can be an arbitrary string, but the current
12514    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
12515    /// + request `id` This can be an arbitrary id, but the current process ID
12516    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
12517    pub fn r#set_debug_client_info(
12518        &self,
12519        mut payload: &NodeSetDebugClientInfoRequest,
12520    ) -> Result<(), fidl::Error> {
12521        self.client.send::<NodeSetDebugClientInfoRequest>(
12522            payload,
12523            0x5cde8914608d99b1,
12524            fidl::encoding::DynamicFlags::FLEXIBLE,
12525        )
12526    }
12527
12528    /// Sysmem logs a warning if sysmem hasn't seen
12529    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
12530    /// within 5 seconds after creation of a new collection.
12531    ///
12532    /// Clients can call this method to change when the log is printed. If
12533    /// multiple client set the deadline, it's unspecified which deadline will
12534    /// take effect.
12535    ///
12536    /// In most cases the default works well.
12537    ///
12538    /// All table fields are currently required.
12539    ///
12540    /// + request `deadline` The time at which sysmem will start trying to log
12541    ///   the warning, unless all constraints are with sysmem by then.
12542    pub fn r#set_debug_timeout_log_deadline(
12543        &self,
12544        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12545    ) -> Result<(), fidl::Error> {
12546        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
12547            payload,
12548            0x716b0af13d5c0806,
12549            fidl::encoding::DynamicFlags::FLEXIBLE,
12550        )
12551    }
12552
12553    /// This enables verbose logging for the buffer collection.
12554    ///
12555    /// Verbose logging includes constraints set via
12556    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
12557    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
12558    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
12559    /// the tree of `Node`(s).
12560    ///
12561    /// Normally sysmem prints only a single line complaint when aggregation
12562    /// fails, with just the specific detailed reason that aggregation failed,
12563    /// with little surrounding context.  While this is often enough to diagnose
12564    /// a problem if only a small change was made and everything was working
12565    /// before the small change, it's often not particularly helpful for getting
12566    /// a new buffer collection to work for the first time.  Especially with
12567    /// more complex trees of nodes, involving things like
12568    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
12569    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
12570    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
12571    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
12572    /// looks like and why it's failing a logical allocation, or why a tree or
12573    /// subtree is failing sooner than expected.
12574    ///
12575    /// The intent of the extra logging is to be acceptable from a performance
12576    /// point of view, under the assumption that verbose logging is only enabled
12577    /// on a low number of buffer collections. If we're not tracking down a bug,
12578    /// we shouldn't send this message.
12579    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
12580        self.client.send::<fidl::encoding::EmptyPayload>(
12581            (),
12582            0x5209c77415b4dfad,
12583            fidl::encoding::DynamicFlags::FLEXIBLE,
12584        )
12585    }
12586
12587    /// This gets a handle that can be used as a parameter to
12588    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
12589    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
12590    /// client obtained this handle from this `Node`.
12591    ///
12592    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
12593    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
12594    /// despite the two calls typically being on different channels.
12595    ///
12596    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
12597    ///
12598    /// All table fields are currently required.
12599    ///
12600    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
12601    ///   different `Node` channel, to prove that the client obtained the handle
12602    ///   from this `Node`.
12603    pub fn r#get_node_ref(
12604        &self,
12605        ___deadline: zx::MonotonicInstant,
12606    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
12607        let _response = self.client.send_query::<
12608            fidl::encoding::EmptyPayload,
12609            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
12610        >(
12611            (),
12612            0x5b3d0e51614df053,
12613            fidl::encoding::DynamicFlags::FLEXIBLE,
12614            ___deadline,
12615        )?
12616        .into_result::<NodeMarker>("get_node_ref")?;
12617        Ok(_response)
12618    }
12619
12620    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
12621    /// rooted at a different child token of a common parent
12622    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
12623    /// passed-in `node_ref`.
12624    ///
12625    /// This call is for assisting with admission control de-duplication, and
12626    /// with debugging.
12627    ///
12628    /// The `node_ref` must be obtained using
12629    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
12630    ///
12631    /// The `node_ref` can be a duplicated handle; it's not necessary to call
12632    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
12633    ///
12634    /// If a calling token may not actually be a valid token at all due to a
12635    /// potentially hostile/untrusted provider of the token, call
12636    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
12637    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
12638    /// never responds due to a calling token not being a real token (not really
12639    /// talking to sysmem).  Another option is to call
12640    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
12641    /// which also validates the token along with converting it to a
12642    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
12643    ///
12644    /// All table fields are currently required.
12645    ///
12646    /// - response `is_alternate`
12647    ///   - true: The first parent node in common between the calling node and
12648    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
12649    ///     that the calling `Node` and the `node_ref` `Node` will not have both
12650    ///     their constraints apply - rather sysmem will choose one or the other
12651    ///     of the constraints - never both.  This is because only one child of
12652    ///     a `BufferCollectionTokenGroup` is selected during logical
12653    ///     allocation, with only that one child's subtree contributing to
12654    ///     constraints aggregation.
12655    ///   - false: The first parent node in common between the calling `Node`
12656    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
12657    ///     Currently, this means the first parent node in common is a
12658    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
12659    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
12660    ///     `Node` may have both their constraints apply during constraints
12661    ///     aggregation of the logical allocation, if both `Node`(s) are
12662    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
12663    ///     this case, there is no `BufferCollectionTokenGroup` that will
12664    ///     directly prevent the two `Node`(s) from both being selected and
12665    ///     their constraints both aggregated, but even when false, one or both
12666    ///     `Node`(s) may still be eliminated from consideration if one or both
12667    ///     `Node`(s) has a direct or indirect parent
12668    ///     `BufferCollectionTokenGroup` which selects a child subtree other
12669    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
12670    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
12671    ///   associated with the same buffer collection as the calling `Node`.
12672    ///   Another reason for this error is if the `node_ref` is an
12673    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
12674    ///   a real `node_ref` obtained from `GetNodeRef`.
12675    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
12676    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
12677    ///   the needed rights expected on a real `node_ref`.
12678    /// * No other failing status codes are returned by this call.  However,
12679    ///   sysmem may add additional codes in future, so the client should have
12680    ///   sensible default handling for any failing status code.
12681    pub fn r#is_alternate_for(
12682        &self,
12683        mut payload: NodeIsAlternateForRequest,
12684        ___deadline: zx::MonotonicInstant,
12685    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
12686        let _response = self.client.send_query::<
12687            NodeIsAlternateForRequest,
12688            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
12689        >(
12690            &mut payload,
12691            0x3a58e00157e0825,
12692            fidl::encoding::DynamicFlags::FLEXIBLE,
12693            ___deadline,
12694        )?
12695        .into_result::<NodeMarker>("is_alternate_for")?;
12696        Ok(_response.map(|x| x))
12697    }
12698
12699    /// Get the buffer collection ID. This ID is also available from
12700    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
12701    /// within the collection).
12702    ///
12703    /// This call is mainly useful in situations where we can't convey a
12704    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
12705    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
12706    /// handle, which can be joined back up with a `BufferCollection` client end
12707    /// that was created via a different path. Prefer to convey a
12708    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
12709    ///
12710    /// Trusting a `buffer_collection_id` value from a source other than sysmem
12711    /// is analogous to trusting a koid value from a source other than zircon.
12712    /// Both should be avoided unless really necessary, and both require
12713    /// caution. In some situations it may be reasonable to refer to a
12714    /// pre-established `BufferCollection` by `buffer_collection_id` via a
12715    /// protocol for efficiency reasons, but an incoming value purporting to be
12716    /// a `buffer_collection_id` is not sufficient alone to justify granting the
12717    /// sender of the `buffer_collection_id` any capability. The sender must
12718    /// first prove to a receiver that the sender has/had a VMO or has/had a
12719    /// `BufferCollectionToken` to the same collection by sending a handle that
12720    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
12721    /// `buffer_collection_id` value. The receiver should take care to avoid
12722    /// assuming that a sender had a `BufferCollectionToken` in cases where the
12723    /// sender has only proven that the sender had a VMO.
12724    ///
12725    /// - response `buffer_collection_id` This ID is unique per buffer
12726    ///   collection per boot. Each buffer is uniquely identified by the
12727    ///   `buffer_collection_id` and `buffer_index` together.
12728    pub fn r#get_buffer_collection_id(
12729        &self,
12730        ___deadline: zx::MonotonicInstant,
12731    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
12732        let _response = self.client.send_query::<
12733            fidl::encoding::EmptyPayload,
12734            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
12735        >(
12736            (),
12737            0x77d19a494b78ba8c,
12738            fidl::encoding::DynamicFlags::FLEXIBLE,
12739            ___deadline,
12740        )?
12741        .into_result::<NodeMarker>("get_buffer_collection_id")?;
12742        Ok(_response)
12743    }
12744
12745    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
12746    /// created after this message to weak, which means that a client's `Node`
12747    /// client end (or a child created after this message) is not alone
12748    /// sufficient to keep allocated VMOs alive.
12749    ///
12750    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
12751    /// `close_weak_asap`.
12752    ///
12753    /// This message is only permitted before the `Node` becomes ready for
12754    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
12755    ///   * `BufferCollectionToken`: any time
12756    ///   * `BufferCollection`: before `SetConstraints`
12757    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
12758    ///
12759    /// Currently, no conversion from strong `Node` to weak `Node` after ready
12760    /// for allocation is provided, but a client can simulate that by creating
12761    /// an additional `Node` before allocation and setting that additional
12762    /// `Node` to weak, and then potentially at some point later sending
12763    /// `Release` and closing the client end of the client's strong `Node`, but
12764    /// keeping the client's weak `Node`.
12765    ///
12766    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
12767    /// collection failure (all `Node` client end(s) will see
12768    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
12769    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
12770    /// this situation until all `Node`(s) are ready for allocation. For initial
12771    /// allocation to succeed, at least one strong `Node` is required to exist
12772    /// at allocation time, but after that client receives VMO handles, that
12773    /// client can `BufferCollection.Release` and close the client end without
12774    /// causing this type of failure.
12775    ///
12776    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
12777    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
12778    /// separately as appropriate.
12779    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
12780        self.client.send::<fidl::encoding::EmptyPayload>(
12781            (),
12782            0x22dd3ea514eeffe1,
12783            fidl::encoding::DynamicFlags::FLEXIBLE,
12784        )
12785    }
12786
12787    /// This indicates to sysmem that the client is prepared to pay attention to
12788    /// `close_weak_asap`.
12789    ///
12790    /// If sent, this message must be before
12791    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
12792    ///
12793    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
12794    /// send this message before `WaitForAllBuffersAllocated`, or a parent
12795    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
12796    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
12797    /// trigger buffer collection failure.
12798    ///
12799    /// This message is necessary because weak sysmem VMOs have not always been
12800    /// a thing, so older clients are not aware of the need to pay attention to
12801    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
12802    /// sysmem weak VMO handles asap. By having this message and requiring
12803    /// participants to indicate their acceptance of this aspect of the overall
12804    /// protocol, we avoid situations where an older client is delivered a weak
12805    /// VMO without any way for sysmem to get that VMO to close quickly later
12806    /// (and on a per-buffer basis).
12807    ///
12808    /// A participant that doesn't handle `close_weak_asap` and also doesn't
12809    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
12810    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
12811    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
12812    /// same participant has a child/delegate which does retrieve VMOs, that
12813    /// child/delegate will need to send `SetWeakOk` before
12814    /// `WaitForAllBuffersAllocated`.
12815    ///
12816    /// + request `for_child_nodes_also` If present and true, this means direct
12817    ///   child nodes of this node created after this message plus all
12818    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
12819    ///   those nodes. Any child node of this node that was created before this
12820    ///   message is not included. This setting is "sticky" in the sense that a
12821    ///   subsequent `SetWeakOk` without this bool set to true does not reset
12822    ///   the server-side bool. If this creates a problem for a participant, a
12823    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
12824    ///   tokens instead, as appropriate. A participant should only set
12825    ///   `for_child_nodes_also` true if the participant can really promise to
12826    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
12827    ///   weak VMO handles held by participants holding the corresponding child
12828    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
12829    ///   which are using sysmem(1) can be weak, despite the clients of those
12830    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
12831    ///   direct way to find out about `close_weak_asap`. This only applies to
12832    ///   descendents of this `Node` which are using sysmem(1), not to this
12833    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
12834    ///   token, which will fail allocation unless an ancestor of this `Node`
12835    ///   specified `for_child_nodes_also` true.
12836    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
12837        self.client.send::<NodeSetWeakOkRequest>(
12838            &mut payload,
12839            0x38a44fc4d7724be9,
12840            fidl::encoding::DynamicFlags::FLEXIBLE,
12841        )
12842    }
12843
12844    /// The server_end will be closed after this `Node` and any child nodes have
12845    /// have released their buffer counts, making those counts available for
12846    /// reservation by a different `Node` via
12847    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
12848    ///
12849    /// The `Node` buffer counts may not be released until the entire tree of
12850    /// `Node`(s) is closed or failed, because
12851    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
12852    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
12853    /// `Node` buffer counts remain reserved until the orphaned node is later
12854    /// cleaned up.
12855    ///
12856    /// If the `Node` exceeds a fairly large number of attached eventpair server
12857    /// ends, a log message will indicate this and the `Node` (and the
12858    /// appropriate) sub-tree will fail.
12859    ///
12860    /// The `server_end` will remain open when
12861    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
12862    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
12863    /// [`fuchsia.sysmem2/BufferCollection`].
12864    ///
12865    /// This message can also be used with a
12866    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
12867    pub fn r#attach_node_tracking(
12868        &self,
12869        mut payload: NodeAttachNodeTrackingRequest,
12870    ) -> Result<(), fidl::Error> {
12871        self.client.send::<NodeAttachNodeTrackingRequest>(
12872            &mut payload,
12873            0x3f22f2a293d3cdac,
12874            fidl::encoding::DynamicFlags::FLEXIBLE,
12875        )
12876    }
12877}
12878
12879#[cfg(target_os = "fuchsia")]
12880impl From<NodeSynchronousProxy> for zx::Handle {
12881    fn from(value: NodeSynchronousProxy) -> Self {
12882        value.into_channel().into()
12883    }
12884}
12885
12886#[cfg(target_os = "fuchsia")]
12887impl From<fidl::Channel> for NodeSynchronousProxy {
12888    fn from(value: fidl::Channel) -> Self {
12889        Self::new(value)
12890    }
12891}
12892
12893#[cfg(target_os = "fuchsia")]
12894impl fidl::endpoints::FromClient for NodeSynchronousProxy {
12895    type Protocol = NodeMarker;
12896
12897    fn from_client(value: fidl::endpoints::ClientEnd<NodeMarker>) -> Self {
12898        Self::new(value.into_channel())
12899    }
12900}
12901
12902#[derive(Debug, Clone)]
12903pub struct NodeProxy {
12904    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
12905}
12906
12907impl fidl::endpoints::Proxy for NodeProxy {
12908    type Protocol = NodeMarker;
12909
12910    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
12911        Self::new(inner)
12912    }
12913
12914    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
12915        self.client.into_channel().map_err(|client| Self { client })
12916    }
12917
12918    fn as_channel(&self) -> &::fidl::AsyncChannel {
12919        self.client.as_channel()
12920    }
12921}
12922
12923impl NodeProxy {
12924    /// Create a new Proxy for fuchsia.sysmem2/Node.
12925    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
12926        let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12927        Self { client: fidl::client::Client::new(channel, protocol_name) }
12928    }
12929
12930    /// Get a Stream of events from the remote end of the protocol.
12931    ///
12932    /// # Panics
12933    ///
12934    /// Panics if the event stream was already taken.
12935    pub fn take_event_stream(&self) -> NodeEventStream {
12936        NodeEventStream { event_receiver: self.client.take_event_receiver() }
12937    }
12938
12939    /// Ensure that previous messages have been received server side. This is
12940    /// particularly useful after previous messages that created new tokens,
12941    /// because a token must be known to the sysmem server before sending the
12942    /// token to another participant.
12943    ///
12944    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12945    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12946    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12947    /// to mitigate the possibility of a hostile/fake
12948    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12949    /// Another way is to pass the token to
12950    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12951    /// the token as part of exchanging it for a
12952    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12953    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12954    /// of stalling.
12955    ///
12956    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12957    /// and then starting and completing a `Sync`, it's then safe to send the
12958    /// `BufferCollectionToken` client ends to other participants knowing the
12959    /// server will recognize the tokens when they're sent by the other
12960    /// participants to sysmem in a
12961    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12962    /// efficient way to create tokens while avoiding unnecessary round trips.
12963    ///
12964    /// Other options include waiting for each
12965    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12966    /// individually (using separate call to `Sync` after each), or calling
12967    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12968    /// converted to a `BufferCollection` via
12969    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12970    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12971    /// the sync step and can create multiple tokens at once.
12972    pub fn r#sync(
12973        &self,
12974    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
12975        NodeProxyInterface::r#sync(self)
12976    }
12977
12978    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12979    ///
12980    /// Normally a participant will convert a `BufferCollectionToken` into a
12981    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12982    /// `Release` via the token (and then close the channel immediately or
12983    /// shortly later in response to server closing the server end), which
12984    /// avoids causing buffer collection failure. Without a prior `Release`,
12985    /// closing the `BufferCollectionToken` client end will cause buffer
12986    /// collection failure.
12987    ///
12988    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12989    ///
12990    /// By default the server handles unexpected closure of a
12991    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12992    /// first) by failing the buffer collection. Partly this is to expedite
12993    /// closing VMO handles to reclaim memory when any participant fails. If a
12994    /// participant would like to cleanly close a `BufferCollection` without
12995    /// causing buffer collection failure, the participant can send `Release`
12996    /// before closing the `BufferCollection` client end. The `Release` can
12997    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12998    /// buffer collection won't require constraints from this node in order to
12999    /// allocate. If after `SetConstraints`, the constraints are retained and
13000    /// aggregated, despite the lack of `BufferCollection` connection at the
13001    /// time of constraints aggregation.
13002    ///
13003    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13004    ///
13005    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13006    /// end (without `Release` first) will trigger failure of the buffer
13007    /// collection. To close a `BufferCollectionTokenGroup` channel without
13008    /// failing the buffer collection, ensure that AllChildrenPresent() has been
13009    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13010    /// client end.
13011    ///
13012    /// If `Release` occurs before
13013    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13014    /// buffer collection will fail (triggered by reception of `Release` without
13015    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13016    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13017    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13018    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13019    /// close requires `AllChildrenPresent` (if not already sent), then
13020    /// `Release`, then close client end.
13021    ///
13022    /// If `Release` occurs after `AllChildrenPresent`, the children and all
13023    /// their constraints remain intact (just as they would if the
13024    /// `BufferCollectionTokenGroup` channel had remained open), and the client
13025    /// end close doesn't trigger buffer collection failure.
13026    ///
13027    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
13028    ///
13029    /// For brevity, the per-channel-protocol paragraphs above ignore the
13030    /// separate failure domain created by
13031    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
13032    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
13033    /// unexpectedly closes (without `Release` first) and that client end is
13034    /// under a failure domain, instead of failing the whole buffer collection,
13035    /// the failure domain is failed, but the buffer collection itself is
13036    /// isolated from failure of the failure domain. Such failure domains can be
13037    /// nested, in which case only the inner-most failure domain in which the
13038    /// `Node` resides fails.
13039    pub fn r#release(&self) -> Result<(), fidl::Error> {
13040        NodeProxyInterface::r#release(self)
13041    }
13042
13043    /// Set a name for VMOs in this buffer collection.
13044    ///
13045    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
13046    /// will be truncated to fit. The name of the vmo will be suffixed with the
13047    /// buffer index within the collection (if the suffix fits within
13048    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
13049    /// listed in the inspect data.
13050    ///
13051    /// The name only affects VMOs allocated after the name is set; this call
13052    /// does not rename existing VMOs. If multiple clients set different names
13053    /// then the larger priority value will win. Setting a new name with the
13054    /// same priority as a prior name doesn't change the name.
13055    ///
13056    /// All table fields are currently required.
13057    ///
13058    /// + request `priority` The name is only set if this is the first `SetName`
13059    ///   or if `priority` is greater than any previous `priority` value in
13060    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
13061    /// + request `name` The name for VMOs created under this buffer collection.
13062    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13063        NodeProxyInterface::r#set_name(self, payload)
13064    }
13065
13066    /// Set information about the current client that can be used by sysmem to
13067    /// help diagnose leaking memory and allocation stalls waiting for a
13068    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
13069    ///
13070    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
13071    /// `Node`(s) derived from this `Node`, unless overriden by
13072    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
13073    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
13074    ///
13075    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
13076    /// `Allocator` is the most efficient way to ensure that all
13077    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
13078    /// set, and is also more efficient than separately sending the same debug
13079    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
13080    /// created [`fuchsia.sysmem2/Node`].
13081    ///
13082    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
13083    /// indicate which client is closing their channel first, leading to subtree
13084    /// failure (which can be normal if the purpose of the subtree is over, but
13085    /// if happening earlier than expected, the client-channel-specific name can
13086    /// help diagnose where the failure is first coming from, from sysmem's
13087    /// point of view).
13088    ///
13089    /// All table fields are currently required.
13090    ///
13091    /// + request `name` This can be an arbitrary string, but the current
13092    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
13093    /// + request `id` This can be an arbitrary id, but the current process ID
13094    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
13095    pub fn r#set_debug_client_info(
13096        &self,
13097        mut payload: &NodeSetDebugClientInfoRequest,
13098    ) -> Result<(), fidl::Error> {
13099        NodeProxyInterface::r#set_debug_client_info(self, payload)
13100    }
13101
13102    /// Sysmem logs a warning if sysmem hasn't seen
13103    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
13104    /// within 5 seconds after creation of a new collection.
13105    ///
13106    /// Clients can call this method to change when the log is printed. If
13107    /// multiple client set the deadline, it's unspecified which deadline will
13108    /// take effect.
13109    ///
13110    /// In most cases the default works well.
13111    ///
13112    /// All table fields are currently required.
13113    ///
13114    /// + request `deadline` The time at which sysmem will start trying to log
13115    ///   the warning, unless all constraints are with sysmem by then.
13116    pub fn r#set_debug_timeout_log_deadline(
13117        &self,
13118        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13119    ) -> Result<(), fidl::Error> {
13120        NodeProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
13121    }
13122
13123    /// This enables verbose logging for the buffer collection.
13124    ///
13125    /// Verbose logging includes constraints set via
13126    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
13127    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
13128    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
13129    /// the tree of `Node`(s).
13130    ///
13131    /// Normally sysmem prints only a single line complaint when aggregation
13132    /// fails, with just the specific detailed reason that aggregation failed,
13133    /// with little surrounding context.  While this is often enough to diagnose
13134    /// a problem if only a small change was made and everything was working
13135    /// before the small change, it's often not particularly helpful for getting
13136    /// a new buffer collection to work for the first time.  Especially with
13137    /// more complex trees of nodes, involving things like
13138    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
13139    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
13140    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
13141    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
13142    /// looks like and why it's failing a logical allocation, or why a tree or
13143    /// subtree is failing sooner than expected.
13144    ///
13145    /// The intent of the extra logging is to be acceptable from a performance
13146    /// point of view, under the assumption that verbose logging is only enabled
13147    /// on a low number of buffer collections. If we're not tracking down a bug,
13148    /// we shouldn't send this message.
13149    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13150        NodeProxyInterface::r#set_verbose_logging(self)
13151    }
13152
13153    /// This gets a handle that can be used as a parameter to
13154    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
13155    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
13156    /// client obtained this handle from this `Node`.
13157    ///
13158    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
13159    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
13160    /// despite the two calls typically being on different channels.
13161    ///
13162    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
13163    ///
13164    /// All table fields are currently required.
13165    ///
13166    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
13167    ///   different `Node` channel, to prove that the client obtained the handle
13168    ///   from this `Node`.
13169    pub fn r#get_node_ref(
13170        &self,
13171    ) -> fidl::client::QueryResponseFut<
13172        NodeGetNodeRefResponse,
13173        fidl::encoding::DefaultFuchsiaResourceDialect,
13174    > {
13175        NodeProxyInterface::r#get_node_ref(self)
13176    }
13177
13178    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
13179    /// rooted at a different child token of a common parent
13180    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
13181    /// passed-in `node_ref`.
13182    ///
13183    /// This call is for assisting with admission control de-duplication, and
13184    /// with debugging.
13185    ///
13186    /// The `node_ref` must be obtained using
13187    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
13188    ///
13189    /// The `node_ref` can be a duplicated handle; it's not necessary to call
13190    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
13191    ///
13192    /// If a calling token may not actually be a valid token at all due to a
13193    /// potentially hostile/untrusted provider of the token, call
13194    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
13195    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
13196    /// never responds due to a calling token not being a real token (not really
13197    /// talking to sysmem).  Another option is to call
13198    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
13199    /// which also validates the token along with converting it to a
13200    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
13201    ///
13202    /// All table fields are currently required.
13203    ///
13204    /// - response `is_alternate`
13205    ///   - true: The first parent node in common between the calling node and
13206    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
13207    ///     that the calling `Node` and the `node_ref` `Node` will not have both
13208    ///     their constraints apply - rather sysmem will choose one or the other
13209    ///     of the constraints - never both.  This is because only one child of
13210    ///     a `BufferCollectionTokenGroup` is selected during logical
13211    ///     allocation, with only that one child's subtree contributing to
13212    ///     constraints aggregation.
13213    ///   - false: The first parent node in common between the calling `Node`
13214    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
13215    ///     Currently, this means the first parent node in common is a
13216    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
13217    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
13218    ///     `Node` may have both their constraints apply during constraints
13219    ///     aggregation of the logical allocation, if both `Node`(s) are
13220    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
13221    ///     this case, there is no `BufferCollectionTokenGroup` that will
13222    ///     directly prevent the two `Node`(s) from both being selected and
13223    ///     their constraints both aggregated, but even when false, one or both
13224    ///     `Node`(s) may still be eliminated from consideration if one or both
13225    ///     `Node`(s) has a direct or indirect parent
13226    ///     `BufferCollectionTokenGroup` which selects a child subtree other
13227    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
13228    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
13229    ///   associated with the same buffer collection as the calling `Node`.
13230    ///   Another reason for this error is if the `node_ref` is an
13231    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
13232    ///   a real `node_ref` obtained from `GetNodeRef`.
13233    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
13234    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
13235    ///   the needed rights expected on a real `node_ref`.
13236    /// * No other failing status codes are returned by this call.  However,
13237    ///   sysmem may add additional codes in future, so the client should have
13238    ///   sensible default handling for any failing status code.
13239    pub fn r#is_alternate_for(
13240        &self,
13241        mut payload: NodeIsAlternateForRequest,
13242    ) -> fidl::client::QueryResponseFut<
13243        NodeIsAlternateForResult,
13244        fidl::encoding::DefaultFuchsiaResourceDialect,
13245    > {
13246        NodeProxyInterface::r#is_alternate_for(self, payload)
13247    }
13248
13249    /// Get the buffer collection ID. This ID is also available from
13250    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
13251    /// within the collection).
13252    ///
13253    /// This call is mainly useful in situations where we can't convey a
13254    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
13255    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
13256    /// handle, which can be joined back up with a `BufferCollection` client end
13257    /// that was created via a different path. Prefer to convey a
13258    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
13259    ///
13260    /// Trusting a `buffer_collection_id` value from a source other than sysmem
13261    /// is analogous to trusting a koid value from a source other than zircon.
13262    /// Both should be avoided unless really necessary, and both require
13263    /// caution. In some situations it may be reasonable to refer to a
13264    /// pre-established `BufferCollection` by `buffer_collection_id` via a
13265    /// protocol for efficiency reasons, but an incoming value purporting to be
13266    /// a `buffer_collection_id` is not sufficient alone to justify granting the
13267    /// sender of the `buffer_collection_id` any capability. The sender must
13268    /// first prove to a receiver that the sender has/had a VMO or has/had a
13269    /// `BufferCollectionToken` to the same collection by sending a handle that
13270    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
13271    /// `buffer_collection_id` value. The receiver should take care to avoid
13272    /// assuming that a sender had a `BufferCollectionToken` in cases where the
13273    /// sender has only proven that the sender had a VMO.
13274    ///
13275    /// - response `buffer_collection_id` This ID is unique per buffer
13276    ///   collection per boot. Each buffer is uniquely identified by the
13277    ///   `buffer_collection_id` and `buffer_index` together.
13278    pub fn r#get_buffer_collection_id(
13279        &self,
13280    ) -> fidl::client::QueryResponseFut<
13281        NodeGetBufferCollectionIdResponse,
13282        fidl::encoding::DefaultFuchsiaResourceDialect,
13283    > {
13284        NodeProxyInterface::r#get_buffer_collection_id(self)
13285    }
13286
13287    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
13288    /// created after this message to weak, which means that a client's `Node`
13289    /// client end (or a child created after this message) is not alone
13290    /// sufficient to keep allocated VMOs alive.
13291    ///
13292    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
13293    /// `close_weak_asap`.
13294    ///
13295    /// This message is only permitted before the `Node` becomes ready for
13296    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
13297    ///   * `BufferCollectionToken`: any time
13298    ///   * `BufferCollection`: before `SetConstraints`
13299    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
13300    ///
13301    /// Currently, no conversion from strong `Node` to weak `Node` after ready
13302    /// for allocation is provided, but a client can simulate that by creating
13303    /// an additional `Node` before allocation and setting that additional
13304    /// `Node` to weak, and then potentially at some point later sending
13305    /// `Release` and closing the client end of the client's strong `Node`, but
13306    /// keeping the client's weak `Node`.
13307    ///
13308    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
13309    /// collection failure (all `Node` client end(s) will see
13310    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
13311    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
13312    /// this situation until all `Node`(s) are ready for allocation. For initial
13313    /// allocation to succeed, at least one strong `Node` is required to exist
13314    /// at allocation time, but after that client receives VMO handles, that
13315    /// client can `BufferCollection.Release` and close the client end without
13316    /// causing this type of failure.
13317    ///
13318    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
13319    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
13320    /// separately as appropriate.
13321    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
13322        NodeProxyInterface::r#set_weak(self)
13323    }
13324
13325    /// This indicates to sysmem that the client is prepared to pay attention to
13326    /// `close_weak_asap`.
13327    ///
13328    /// If sent, this message must be before
13329    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
13330    ///
13331    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
13332    /// send this message before `WaitForAllBuffersAllocated`, or a parent
13333    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
13334    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
13335    /// trigger buffer collection failure.
13336    ///
13337    /// This message is necessary because weak sysmem VMOs have not always been
13338    /// a thing, so older clients are not aware of the need to pay attention to
13339    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
13340    /// sysmem weak VMO handles asap. By having this message and requiring
13341    /// participants to indicate their acceptance of this aspect of the overall
13342    /// protocol, we avoid situations where an older client is delivered a weak
13343    /// VMO without any way for sysmem to get that VMO to close quickly later
13344    /// (and on a per-buffer basis).
13345    ///
13346    /// A participant that doesn't handle `close_weak_asap` and also doesn't
13347    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
13348    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
13349    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
13350    /// same participant has a child/delegate which does retrieve VMOs, that
13351    /// child/delegate will need to send `SetWeakOk` before
13352    /// `WaitForAllBuffersAllocated`.
13353    ///
13354    /// + request `for_child_nodes_also` If present and true, this means direct
13355    ///   child nodes of this node created after this message plus all
13356    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
13357    ///   those nodes. Any child node of this node that was created before this
13358    ///   message is not included. This setting is "sticky" in the sense that a
13359    ///   subsequent `SetWeakOk` without this bool set to true does not reset
13360    ///   the server-side bool. If this creates a problem for a participant, a
13361    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
13362    ///   tokens instead, as appropriate. A participant should only set
13363    ///   `for_child_nodes_also` true if the participant can really promise to
13364    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
13365    ///   weak VMO handles held by participants holding the corresponding child
13366    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
13367    ///   which are using sysmem(1) can be weak, despite the clients of those
13368    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
13369    ///   direct way to find out about `close_weak_asap`. This only applies to
13370    ///   descendents of this `Node` which are using sysmem(1), not to this
13371    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
13372    ///   token, which will fail allocation unless an ancestor of this `Node`
13373    ///   specified `for_child_nodes_also` true.
13374    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13375        NodeProxyInterface::r#set_weak_ok(self, payload)
13376    }
13377
13378    /// The server_end will be closed after this `Node` and any child nodes have
13379    /// have released their buffer counts, making those counts available for
13380    /// reservation by a different `Node` via
13381    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
13382    ///
13383    /// The `Node` buffer counts may not be released until the entire tree of
13384    /// `Node`(s) is closed or failed, because
13385    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
13386    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
13387    /// `Node` buffer counts remain reserved until the orphaned node is later
13388    /// cleaned up.
13389    ///
13390    /// If the `Node` exceeds a fairly large number of attached eventpair server
13391    /// ends, a log message will indicate this and the `Node` (and the
13392    /// appropriate) sub-tree will fail.
13393    ///
13394    /// The `server_end` will remain open when
13395    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
13396    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
13397    /// [`fuchsia.sysmem2/BufferCollection`].
13398    ///
13399    /// This message can also be used with a
13400    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
13401    pub fn r#attach_node_tracking(
13402        &self,
13403        mut payload: NodeAttachNodeTrackingRequest,
13404    ) -> Result<(), fidl::Error> {
13405        NodeProxyInterface::r#attach_node_tracking(self, payload)
13406    }
13407}
13408
13409impl NodeProxyInterface for NodeProxy {
13410    type SyncResponseFut =
13411        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
13412    fn r#sync(&self) -> Self::SyncResponseFut {
13413        fn _decode(
13414            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13415        ) -> Result<(), fidl::Error> {
13416            let _response = fidl::client::decode_transaction_body::<
13417                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
13418                fidl::encoding::DefaultFuchsiaResourceDialect,
13419                0x11ac2555cf575b54,
13420            >(_buf?)?
13421            .into_result::<NodeMarker>("sync")?;
13422            Ok(_response)
13423        }
13424        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
13425            (),
13426            0x11ac2555cf575b54,
13427            fidl::encoding::DynamicFlags::FLEXIBLE,
13428            _decode,
13429        )
13430    }
13431
13432    fn r#release(&self) -> Result<(), fidl::Error> {
13433        self.client.send::<fidl::encoding::EmptyPayload>(
13434            (),
13435            0x6a5cae7d6d6e04c6,
13436            fidl::encoding::DynamicFlags::FLEXIBLE,
13437        )
13438    }
13439
13440    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13441        self.client.send::<NodeSetNameRequest>(
13442            payload,
13443            0xb41f1624f48c1e9,
13444            fidl::encoding::DynamicFlags::FLEXIBLE,
13445        )
13446    }
13447
13448    fn r#set_debug_client_info(
13449        &self,
13450        mut payload: &NodeSetDebugClientInfoRequest,
13451    ) -> Result<(), fidl::Error> {
13452        self.client.send::<NodeSetDebugClientInfoRequest>(
13453            payload,
13454            0x5cde8914608d99b1,
13455            fidl::encoding::DynamicFlags::FLEXIBLE,
13456        )
13457    }
13458
13459    fn r#set_debug_timeout_log_deadline(
13460        &self,
13461        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13462    ) -> Result<(), fidl::Error> {
13463        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
13464            payload,
13465            0x716b0af13d5c0806,
13466            fidl::encoding::DynamicFlags::FLEXIBLE,
13467        )
13468    }
13469
13470    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13471        self.client.send::<fidl::encoding::EmptyPayload>(
13472            (),
13473            0x5209c77415b4dfad,
13474            fidl::encoding::DynamicFlags::FLEXIBLE,
13475        )
13476    }
13477
13478    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
13479        NodeGetNodeRefResponse,
13480        fidl::encoding::DefaultFuchsiaResourceDialect,
13481    >;
13482    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
13483        fn _decode(
13484            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13485        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
13486            let _response = fidl::client::decode_transaction_body::<
13487                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
13488                fidl::encoding::DefaultFuchsiaResourceDialect,
13489                0x5b3d0e51614df053,
13490            >(_buf?)?
13491            .into_result::<NodeMarker>("get_node_ref")?;
13492            Ok(_response)
13493        }
13494        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
13495            (),
13496            0x5b3d0e51614df053,
13497            fidl::encoding::DynamicFlags::FLEXIBLE,
13498            _decode,
13499        )
13500    }
13501
13502    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
13503        NodeIsAlternateForResult,
13504        fidl::encoding::DefaultFuchsiaResourceDialect,
13505    >;
13506    fn r#is_alternate_for(
13507        &self,
13508        mut payload: NodeIsAlternateForRequest,
13509    ) -> Self::IsAlternateForResponseFut {
13510        fn _decode(
13511            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13512        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
13513            let _response = fidl::client::decode_transaction_body::<
13514                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
13515                fidl::encoding::DefaultFuchsiaResourceDialect,
13516                0x3a58e00157e0825,
13517            >(_buf?)?
13518            .into_result::<NodeMarker>("is_alternate_for")?;
13519            Ok(_response.map(|x| x))
13520        }
13521        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
13522            &mut payload,
13523            0x3a58e00157e0825,
13524            fidl::encoding::DynamicFlags::FLEXIBLE,
13525            _decode,
13526        )
13527    }
13528
13529    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
13530        NodeGetBufferCollectionIdResponse,
13531        fidl::encoding::DefaultFuchsiaResourceDialect,
13532    >;
13533    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
13534        fn _decode(
13535            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13536        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
13537            let _response = fidl::client::decode_transaction_body::<
13538                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
13539                fidl::encoding::DefaultFuchsiaResourceDialect,
13540                0x77d19a494b78ba8c,
13541            >(_buf?)?
13542            .into_result::<NodeMarker>("get_buffer_collection_id")?;
13543            Ok(_response)
13544        }
13545        self.client.send_query_and_decode::<
13546            fidl::encoding::EmptyPayload,
13547            NodeGetBufferCollectionIdResponse,
13548        >(
13549            (),
13550            0x77d19a494b78ba8c,
13551            fidl::encoding::DynamicFlags::FLEXIBLE,
13552            _decode,
13553        )
13554    }
13555
13556    fn r#set_weak(&self) -> Result<(), fidl::Error> {
13557        self.client.send::<fidl::encoding::EmptyPayload>(
13558            (),
13559            0x22dd3ea514eeffe1,
13560            fidl::encoding::DynamicFlags::FLEXIBLE,
13561        )
13562    }
13563
13564    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13565        self.client.send::<NodeSetWeakOkRequest>(
13566            &mut payload,
13567            0x38a44fc4d7724be9,
13568            fidl::encoding::DynamicFlags::FLEXIBLE,
13569        )
13570    }
13571
13572    fn r#attach_node_tracking(
13573        &self,
13574        mut payload: NodeAttachNodeTrackingRequest,
13575    ) -> Result<(), fidl::Error> {
13576        self.client.send::<NodeAttachNodeTrackingRequest>(
13577            &mut payload,
13578            0x3f22f2a293d3cdac,
13579            fidl::encoding::DynamicFlags::FLEXIBLE,
13580        )
13581    }
13582}
13583
13584pub struct NodeEventStream {
13585    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
13586}
13587
13588impl std::marker::Unpin for NodeEventStream {}
13589
13590impl futures::stream::FusedStream for NodeEventStream {
13591    fn is_terminated(&self) -> bool {
13592        self.event_receiver.is_terminated()
13593    }
13594}
13595
13596impl futures::Stream for NodeEventStream {
13597    type Item = Result<NodeEvent, fidl::Error>;
13598
13599    fn poll_next(
13600        mut self: std::pin::Pin<&mut Self>,
13601        cx: &mut std::task::Context<'_>,
13602    ) -> std::task::Poll<Option<Self::Item>> {
13603        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
13604            &mut self.event_receiver,
13605            cx
13606        )?) {
13607            Some(buf) => std::task::Poll::Ready(Some(NodeEvent::decode(buf))),
13608            None => std::task::Poll::Ready(None),
13609        }
13610    }
13611}
13612
13613#[derive(Debug)]
13614pub enum NodeEvent {
13615    #[non_exhaustive]
13616    _UnknownEvent {
13617        /// Ordinal of the event that was sent.
13618        ordinal: u64,
13619    },
13620}
13621
13622impl NodeEvent {
13623    /// Decodes a message buffer as a [`NodeEvent`].
13624    fn decode(
13625        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
13626    ) -> Result<NodeEvent, fidl::Error> {
13627        let (bytes, _handles) = buf.split_mut();
13628        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13629        debug_assert_eq!(tx_header.tx_id, 0);
13630        match tx_header.ordinal {
13631            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
13632                Ok(NodeEvent::_UnknownEvent { ordinal: tx_header.ordinal })
13633            }
13634            _ => Err(fidl::Error::UnknownOrdinal {
13635                ordinal: tx_header.ordinal,
13636                protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13637            }),
13638        }
13639    }
13640}
13641
13642/// A Stream of incoming requests for fuchsia.sysmem2/Node.
13643pub struct NodeRequestStream {
13644    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13645    is_terminated: bool,
13646}
13647
13648impl std::marker::Unpin for NodeRequestStream {}
13649
13650impl futures::stream::FusedStream for NodeRequestStream {
13651    fn is_terminated(&self) -> bool {
13652        self.is_terminated
13653    }
13654}
13655
13656impl fidl::endpoints::RequestStream for NodeRequestStream {
13657    type Protocol = NodeMarker;
13658    type ControlHandle = NodeControlHandle;
13659
13660    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
13661        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
13662    }
13663
13664    fn control_handle(&self) -> Self::ControlHandle {
13665        NodeControlHandle { inner: self.inner.clone() }
13666    }
13667
13668    fn into_inner(
13669        self,
13670    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
13671    {
13672        (self.inner, self.is_terminated)
13673    }
13674
13675    fn from_inner(
13676        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13677        is_terminated: bool,
13678    ) -> Self {
13679        Self { inner, is_terminated }
13680    }
13681}
13682
13683impl futures::Stream for NodeRequestStream {
13684    type Item = Result<NodeRequest, fidl::Error>;
13685
13686    fn poll_next(
13687        mut self: std::pin::Pin<&mut Self>,
13688        cx: &mut std::task::Context<'_>,
13689    ) -> std::task::Poll<Option<Self::Item>> {
13690        let this = &mut *self;
13691        if this.inner.check_shutdown(cx) {
13692            this.is_terminated = true;
13693            return std::task::Poll::Ready(None);
13694        }
13695        if this.is_terminated {
13696            panic!("polled NodeRequestStream after completion");
13697        }
13698        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
13699            |bytes, handles| {
13700                match this.inner.channel().read_etc(cx, bytes, handles) {
13701                    std::task::Poll::Ready(Ok(())) => {}
13702                    std::task::Poll::Pending => return std::task::Poll::Pending,
13703                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
13704                        this.is_terminated = true;
13705                        return std::task::Poll::Ready(None);
13706                    }
13707                    std::task::Poll::Ready(Err(e)) => {
13708                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
13709                            e.into(),
13710                        ))));
13711                    }
13712                }
13713
13714                // A message has been received from the channel
13715                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13716
13717                std::task::Poll::Ready(Some(match header.ordinal {
13718                    0x11ac2555cf575b54 => {
13719                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13720                        let mut req = fidl::new_empty!(
13721                            fidl::encoding::EmptyPayload,
13722                            fidl::encoding::DefaultFuchsiaResourceDialect
13723                        );
13724                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13725                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13726                        Ok(NodeRequest::Sync {
13727                            responder: NodeSyncResponder {
13728                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13729                                tx_id: header.tx_id,
13730                            },
13731                        })
13732                    }
13733                    0x6a5cae7d6d6e04c6 => {
13734                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13735                        let mut req = fidl::new_empty!(
13736                            fidl::encoding::EmptyPayload,
13737                            fidl::encoding::DefaultFuchsiaResourceDialect
13738                        );
13739                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13740                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13741                        Ok(NodeRequest::Release { control_handle })
13742                    }
13743                    0xb41f1624f48c1e9 => {
13744                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13745                        let mut req = fidl::new_empty!(
13746                            NodeSetNameRequest,
13747                            fidl::encoding::DefaultFuchsiaResourceDialect
13748                        );
13749                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
13750                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13751                        Ok(NodeRequest::SetName { payload: req, control_handle })
13752                    }
13753                    0x5cde8914608d99b1 => {
13754                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13755                        let mut req = fidl::new_empty!(
13756                            NodeSetDebugClientInfoRequest,
13757                            fidl::encoding::DefaultFuchsiaResourceDialect
13758                        );
13759                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
13760                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13761                        Ok(NodeRequest::SetDebugClientInfo { payload: req, control_handle })
13762                    }
13763                    0x716b0af13d5c0806 => {
13764                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13765                        let mut req = fidl::new_empty!(
13766                            NodeSetDebugTimeoutLogDeadlineRequest,
13767                            fidl::encoding::DefaultFuchsiaResourceDialect
13768                        );
13769                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
13770                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13771                        Ok(NodeRequest::SetDebugTimeoutLogDeadline { payload: req, control_handle })
13772                    }
13773                    0x5209c77415b4dfad => {
13774                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13775                        let mut req = fidl::new_empty!(
13776                            fidl::encoding::EmptyPayload,
13777                            fidl::encoding::DefaultFuchsiaResourceDialect
13778                        );
13779                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13780                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13781                        Ok(NodeRequest::SetVerboseLogging { control_handle })
13782                    }
13783                    0x5b3d0e51614df053 => {
13784                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13785                        let mut req = fidl::new_empty!(
13786                            fidl::encoding::EmptyPayload,
13787                            fidl::encoding::DefaultFuchsiaResourceDialect
13788                        );
13789                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13790                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13791                        Ok(NodeRequest::GetNodeRef {
13792                            responder: NodeGetNodeRefResponder {
13793                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13794                                tx_id: header.tx_id,
13795                            },
13796                        })
13797                    }
13798                    0x3a58e00157e0825 => {
13799                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13800                        let mut req = fidl::new_empty!(
13801                            NodeIsAlternateForRequest,
13802                            fidl::encoding::DefaultFuchsiaResourceDialect
13803                        );
13804                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
13805                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13806                        Ok(NodeRequest::IsAlternateFor {
13807                            payload: req,
13808                            responder: NodeIsAlternateForResponder {
13809                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13810                                tx_id: header.tx_id,
13811                            },
13812                        })
13813                    }
13814                    0x77d19a494b78ba8c => {
13815                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13816                        let mut req = fidl::new_empty!(
13817                            fidl::encoding::EmptyPayload,
13818                            fidl::encoding::DefaultFuchsiaResourceDialect
13819                        );
13820                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13821                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13822                        Ok(NodeRequest::GetBufferCollectionId {
13823                            responder: NodeGetBufferCollectionIdResponder {
13824                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13825                                tx_id: header.tx_id,
13826                            },
13827                        })
13828                    }
13829                    0x22dd3ea514eeffe1 => {
13830                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13831                        let mut req = fidl::new_empty!(
13832                            fidl::encoding::EmptyPayload,
13833                            fidl::encoding::DefaultFuchsiaResourceDialect
13834                        );
13835                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13836                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13837                        Ok(NodeRequest::SetWeak { control_handle })
13838                    }
13839                    0x38a44fc4d7724be9 => {
13840                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13841                        let mut req = fidl::new_empty!(
13842                            NodeSetWeakOkRequest,
13843                            fidl::encoding::DefaultFuchsiaResourceDialect
13844                        );
13845                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
13846                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13847                        Ok(NodeRequest::SetWeakOk { payload: req, control_handle })
13848                    }
13849                    0x3f22f2a293d3cdac => {
13850                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13851                        let mut req = fidl::new_empty!(
13852                            NodeAttachNodeTrackingRequest,
13853                            fidl::encoding::DefaultFuchsiaResourceDialect
13854                        );
13855                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
13856                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13857                        Ok(NodeRequest::AttachNodeTracking { payload: req, control_handle })
13858                    }
13859                    _ if header.tx_id == 0
13860                        && header
13861                            .dynamic_flags()
13862                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13863                    {
13864                        Ok(NodeRequest::_UnknownMethod {
13865                            ordinal: header.ordinal,
13866                            control_handle: NodeControlHandle { inner: this.inner.clone() },
13867                            method_type: fidl::MethodType::OneWay,
13868                        })
13869                    }
13870                    _ if header
13871                        .dynamic_flags()
13872                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13873                    {
13874                        this.inner.send_framework_err(
13875                            fidl::encoding::FrameworkErr::UnknownMethod,
13876                            header.tx_id,
13877                            header.ordinal,
13878                            header.dynamic_flags(),
13879                            (bytes, handles),
13880                        )?;
13881                        Ok(NodeRequest::_UnknownMethod {
13882                            ordinal: header.ordinal,
13883                            control_handle: NodeControlHandle { inner: this.inner.clone() },
13884                            method_type: fidl::MethodType::TwoWay,
13885                        })
13886                    }
13887                    _ => Err(fidl::Error::UnknownOrdinal {
13888                        ordinal: header.ordinal,
13889                        protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13890                    }),
13891                }))
13892            },
13893        )
13894    }
13895}
13896
13897/// This protocol is the parent protocol for all nodes in the tree established
13898/// by [`fuchsia.sysmem2/BufferCollectionToken`] creation and
13899/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] creation, including
13900/// [`fuchsia.sysmem2/BufferCollectionToken`](s) which have since been converted
13901/// to a [`fuchsia.sysmem2/BufferCollection`] channel.
13902///
13903/// Epitaphs are not used in this protocol.
13904#[derive(Debug)]
13905pub enum NodeRequest {
13906    /// Ensure that previous messages have been received server side. This is
13907    /// particularly useful after previous messages that created new tokens,
13908    /// because a token must be known to the sysmem server before sending the
13909    /// token to another participant.
13910    ///
13911    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
13912    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
13913    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
13914    /// to mitigate the possibility of a hostile/fake
13915    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
13916    /// Another way is to pass the token to
13917    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
13918    /// the token as part of exchanging it for a
13919    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
13920    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
13921    /// of stalling.
13922    ///
13923    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
13924    /// and then starting and completing a `Sync`, it's then safe to send the
13925    /// `BufferCollectionToken` client ends to other participants knowing the
13926    /// server will recognize the tokens when they're sent by the other
13927    /// participants to sysmem in a
13928    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
13929    /// efficient way to create tokens while avoiding unnecessary round trips.
13930    ///
13931    /// Other options include waiting for each
13932    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
13933    /// individually (using separate call to `Sync` after each), or calling
13934    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
13935    /// converted to a `BufferCollection` via
13936    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
13937    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
13938    /// the sync step and can create multiple tokens at once.
13939    Sync { responder: NodeSyncResponder },
13940    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
13941    ///
13942    /// Normally a participant will convert a `BufferCollectionToken` into a
13943    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
13944    /// `Release` via the token (and then close the channel immediately or
13945    /// shortly later in response to server closing the server end), which
13946    /// avoids causing buffer collection failure. Without a prior `Release`,
13947    /// closing the `BufferCollectionToken` client end will cause buffer
13948    /// collection failure.
13949    ///
13950    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
13951    ///
13952    /// By default the server handles unexpected closure of a
13953    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
13954    /// first) by failing the buffer collection. Partly this is to expedite
13955    /// closing VMO handles to reclaim memory when any participant fails. If a
13956    /// participant would like to cleanly close a `BufferCollection` without
13957    /// causing buffer collection failure, the participant can send `Release`
13958    /// before closing the `BufferCollection` client end. The `Release` can
13959    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
13960    /// buffer collection won't require constraints from this node in order to
13961    /// allocate. If after `SetConstraints`, the constraints are retained and
13962    /// aggregated, despite the lack of `BufferCollection` connection at the
13963    /// time of constraints aggregation.
13964    ///
13965    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13966    ///
13967    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13968    /// end (without `Release` first) will trigger failure of the buffer
13969    /// collection. To close a `BufferCollectionTokenGroup` channel without
13970    /// failing the buffer collection, ensure that AllChildrenPresent() has been
13971    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13972    /// client end.
13973    ///
13974    /// If `Release` occurs before
13975    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13976    /// buffer collection will fail (triggered by reception of `Release` without
13977    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13978    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13979    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13980    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13981    /// close requires `AllChildrenPresent` (if not already sent), then
13982    /// `Release`, then close client end.
13983    ///
13984    /// If `Release` occurs after `AllChildrenPresent`, the children and all
13985    /// their constraints remain intact (just as they would if the
13986    /// `BufferCollectionTokenGroup` channel had remained open), and the client
13987    /// end close doesn't trigger buffer collection failure.
13988    ///
13989    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
13990    ///
13991    /// For brevity, the per-channel-protocol paragraphs above ignore the
13992    /// separate failure domain created by
13993    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
13994    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
13995    /// unexpectedly closes (without `Release` first) and that client end is
13996    /// under a failure domain, instead of failing the whole buffer collection,
13997    /// the failure domain is failed, but the buffer collection itself is
13998    /// isolated from failure of the failure domain. Such failure domains can be
13999    /// nested, in which case only the inner-most failure domain in which the
14000    /// `Node` resides fails.
14001    Release { control_handle: NodeControlHandle },
14002    /// Set a name for VMOs in this buffer collection.
14003    ///
14004    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
14005    /// will be truncated to fit. The name of the vmo will be suffixed with the
14006    /// buffer index within the collection (if the suffix fits within
14007    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
14008    /// listed in the inspect data.
14009    ///
14010    /// The name only affects VMOs allocated after the name is set; this call
14011    /// does not rename existing VMOs. If multiple clients set different names
14012    /// then the larger priority value will win. Setting a new name with the
14013    /// same priority as a prior name doesn't change the name.
14014    ///
14015    /// All table fields are currently required.
14016    ///
14017    /// + request `priority` The name is only set if this is the first `SetName`
14018    ///   or if `priority` is greater than any previous `priority` value in
14019    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
14020    /// + request `name` The name for VMOs created under this buffer collection.
14021    SetName { payload: NodeSetNameRequest, control_handle: NodeControlHandle },
14022    /// Set information about the current client that can be used by sysmem to
14023    /// help diagnose leaking memory and allocation stalls waiting for a
14024    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
14025    ///
14026    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
14027    /// `Node`(s) derived from this `Node`, unless overriden by
14028    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
14029    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
14030    ///
14031    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
14032    /// `Allocator` is the most efficient way to ensure that all
14033    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
14034    /// set, and is also more efficient than separately sending the same debug
14035    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
14036    /// created [`fuchsia.sysmem2/Node`].
14037    ///
14038    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
14039    /// indicate which client is closing their channel first, leading to subtree
14040    /// failure (which can be normal if the purpose of the subtree is over, but
14041    /// if happening earlier than expected, the client-channel-specific name can
14042    /// help diagnose where the failure is first coming from, from sysmem's
14043    /// point of view).
14044    ///
14045    /// All table fields are currently required.
14046    ///
14047    /// + request `name` This can be an arbitrary string, but the current
14048    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
14049    /// + request `id` This can be an arbitrary id, but the current process ID
14050    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
14051    SetDebugClientInfo { payload: NodeSetDebugClientInfoRequest, control_handle: NodeControlHandle },
14052    /// Sysmem logs a warning if sysmem hasn't seen
14053    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
14054    /// within 5 seconds after creation of a new collection.
14055    ///
14056    /// Clients can call this method to change when the log is printed. If
14057    /// multiple client set the deadline, it's unspecified which deadline will
14058    /// take effect.
14059    ///
14060    /// In most cases the default works well.
14061    ///
14062    /// All table fields are currently required.
14063    ///
14064    /// + request `deadline` The time at which sysmem will start trying to log
14065    ///   the warning, unless all constraints are with sysmem by then.
14066    SetDebugTimeoutLogDeadline {
14067        payload: NodeSetDebugTimeoutLogDeadlineRequest,
14068        control_handle: NodeControlHandle,
14069    },
14070    /// This enables verbose logging for the buffer collection.
14071    ///
14072    /// Verbose logging includes constraints set via
14073    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
14074    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
14075    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
14076    /// the tree of `Node`(s).
14077    ///
14078    /// Normally sysmem prints only a single line complaint when aggregation
14079    /// fails, with just the specific detailed reason that aggregation failed,
14080    /// with little surrounding context.  While this is often enough to diagnose
14081    /// a problem if only a small change was made and everything was working
14082    /// before the small change, it's often not particularly helpful for getting
14083    /// a new buffer collection to work for the first time.  Especially with
14084    /// more complex trees of nodes, involving things like
14085    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
14086    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
14087    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
14088    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
14089    /// looks like and why it's failing a logical allocation, or why a tree or
14090    /// subtree is failing sooner than expected.
14091    ///
14092    /// The intent of the extra logging is to be acceptable from a performance
14093    /// point of view, under the assumption that verbose logging is only enabled
14094    /// on a low number of buffer collections. If we're not tracking down a bug,
14095    /// we shouldn't send this message.
14096    SetVerboseLogging { control_handle: NodeControlHandle },
14097    /// This gets a handle that can be used as a parameter to
14098    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
14099    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
14100    /// client obtained this handle from this `Node`.
14101    ///
14102    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
14103    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
14104    /// despite the two calls typically being on different channels.
14105    ///
14106    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
14107    ///
14108    /// All table fields are currently required.
14109    ///
14110    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
14111    ///   different `Node` channel, to prove that the client obtained the handle
14112    ///   from this `Node`.
14113    GetNodeRef { responder: NodeGetNodeRefResponder },
14114    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
14115    /// rooted at a different child token of a common parent
14116    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
14117    /// passed-in `node_ref`.
14118    ///
14119    /// This call is for assisting with admission control de-duplication, and
14120    /// with debugging.
14121    ///
14122    /// The `node_ref` must be obtained using
14123    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
14124    ///
14125    /// The `node_ref` can be a duplicated handle; it's not necessary to call
14126    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
14127    ///
14128    /// If a calling token may not actually be a valid token at all due to a
14129    /// potentially hostile/untrusted provider of the token, call
14130    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
14131    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
14132    /// never responds due to a calling token not being a real token (not really
14133    /// talking to sysmem).  Another option is to call
14134    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
14135    /// which also validates the token along with converting it to a
14136    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
14137    ///
14138    /// All table fields are currently required.
14139    ///
14140    /// - response `is_alternate`
14141    ///   - true: The first parent node in common between the calling node and
14142    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
14143    ///     that the calling `Node` and the `node_ref` `Node` will not have both
14144    ///     their constraints apply - rather sysmem will choose one or the other
14145    ///     of the constraints - never both.  This is because only one child of
14146    ///     a `BufferCollectionTokenGroup` is selected during logical
14147    ///     allocation, with only that one child's subtree contributing to
14148    ///     constraints aggregation.
14149    ///   - false: The first parent node in common between the calling `Node`
14150    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
14151    ///     Currently, this means the first parent node in common is a
14152    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
14153    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
14154    ///     `Node` may have both their constraints apply during constraints
14155    ///     aggregation of the logical allocation, if both `Node`(s) are
14156    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
14157    ///     this case, there is no `BufferCollectionTokenGroup` that will
14158    ///     directly prevent the two `Node`(s) from both being selected and
14159    ///     their constraints both aggregated, but even when false, one or both
14160    ///     `Node`(s) may still be eliminated from consideration if one or both
14161    ///     `Node`(s) has a direct or indirect parent
14162    ///     `BufferCollectionTokenGroup` which selects a child subtree other
14163    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
14164    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
14165    ///   associated with the same buffer collection as the calling `Node`.
14166    ///   Another reason for this error is if the `node_ref` is an
14167    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
14168    ///   a real `node_ref` obtained from `GetNodeRef`.
14169    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
14170    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
14171    ///   the needed rights expected on a real `node_ref`.
14172    /// * No other failing status codes are returned by this call.  However,
14173    ///   sysmem may add additional codes in future, so the client should have
14174    ///   sensible default handling for any failing status code.
14175    IsAlternateFor { payload: NodeIsAlternateForRequest, responder: NodeIsAlternateForResponder },
14176    /// Get the buffer collection ID. This ID is also available from
14177    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
14178    /// within the collection).
14179    ///
14180    /// This call is mainly useful in situations where we can't convey a
14181    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
14182    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
14183    /// handle, which can be joined back up with a `BufferCollection` client end
14184    /// that was created via a different path. Prefer to convey a
14185    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
14186    ///
14187    /// Trusting a `buffer_collection_id` value from a source other than sysmem
14188    /// is analogous to trusting a koid value from a source other than zircon.
14189    /// Both should be avoided unless really necessary, and both require
14190    /// caution. In some situations it may be reasonable to refer to a
14191    /// pre-established `BufferCollection` by `buffer_collection_id` via a
14192    /// protocol for efficiency reasons, but an incoming value purporting to be
14193    /// a `buffer_collection_id` is not sufficient alone to justify granting the
14194    /// sender of the `buffer_collection_id` any capability. The sender must
14195    /// first prove to a receiver that the sender has/had a VMO or has/had a
14196    /// `BufferCollectionToken` to the same collection by sending a handle that
14197    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
14198    /// `buffer_collection_id` value. The receiver should take care to avoid
14199    /// assuming that a sender had a `BufferCollectionToken` in cases where the
14200    /// sender has only proven that the sender had a VMO.
14201    ///
14202    /// - response `buffer_collection_id` This ID is unique per buffer
14203    ///   collection per boot. Each buffer is uniquely identified by the
14204    ///   `buffer_collection_id` and `buffer_index` together.
14205    GetBufferCollectionId { responder: NodeGetBufferCollectionIdResponder },
14206    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
14207    /// created after this message to weak, which means that a client's `Node`
14208    /// client end (or a child created after this message) is not alone
14209    /// sufficient to keep allocated VMOs alive.
14210    ///
14211    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
14212    /// `close_weak_asap`.
14213    ///
14214    /// This message is only permitted before the `Node` becomes ready for
14215    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
14216    ///   * `BufferCollectionToken`: any time
14217    ///   * `BufferCollection`: before `SetConstraints`
14218    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
14219    ///
14220    /// Currently, no conversion from strong `Node` to weak `Node` after ready
14221    /// for allocation is provided, but a client can simulate that by creating
14222    /// an additional `Node` before allocation and setting that additional
14223    /// `Node` to weak, and then potentially at some point later sending
14224    /// `Release` and closing the client end of the client's strong `Node`, but
14225    /// keeping the client's weak `Node`.
14226    ///
14227    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
14228    /// collection failure (all `Node` client end(s) will see
14229    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
14230    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
14231    /// this situation until all `Node`(s) are ready for allocation. For initial
14232    /// allocation to succeed, at least one strong `Node` is required to exist
14233    /// at allocation time, but after that client receives VMO handles, that
14234    /// client can `BufferCollection.Release` and close the client end without
14235    /// causing this type of failure.
14236    ///
14237    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
14238    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
14239    /// separately as appropriate.
14240    SetWeak { control_handle: NodeControlHandle },
14241    /// This indicates to sysmem that the client is prepared to pay attention to
14242    /// `close_weak_asap`.
14243    ///
14244    /// If sent, this message must be before
14245    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
14246    ///
14247    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
14248    /// send this message before `WaitForAllBuffersAllocated`, or a parent
14249    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
14250    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
14251    /// trigger buffer collection failure.
14252    ///
14253    /// This message is necessary because weak sysmem VMOs have not always been
14254    /// a thing, so older clients are not aware of the need to pay attention to
14255    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
14256    /// sysmem weak VMO handles asap. By having this message and requiring
14257    /// participants to indicate their acceptance of this aspect of the overall
14258    /// protocol, we avoid situations where an older client is delivered a weak
14259    /// VMO without any way for sysmem to get that VMO to close quickly later
14260    /// (and on a per-buffer basis).
14261    ///
14262    /// A participant that doesn't handle `close_weak_asap` and also doesn't
14263    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
14264    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
14265    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
14266    /// same participant has a child/delegate which does retrieve VMOs, that
14267    /// child/delegate will need to send `SetWeakOk` before
14268    /// `WaitForAllBuffersAllocated`.
14269    ///
14270    /// + request `for_child_nodes_also` If present and true, this means direct
14271    ///   child nodes of this node created after this message plus all
14272    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
14273    ///   those nodes. Any child node of this node that was created before this
14274    ///   message is not included. This setting is "sticky" in the sense that a
14275    ///   subsequent `SetWeakOk` without this bool set to true does not reset
14276    ///   the server-side bool. If this creates a problem for a participant, a
14277    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
14278    ///   tokens instead, as appropriate. A participant should only set
14279    ///   `for_child_nodes_also` true if the participant can really promise to
14280    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
14281    ///   weak VMO handles held by participants holding the corresponding child
14282    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
14283    ///   which are using sysmem(1) can be weak, despite the clients of those
14284    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
14285    ///   direct way to find out about `close_weak_asap`. This only applies to
14286    ///   descendents of this `Node` which are using sysmem(1), not to this
14287    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
14288    ///   token, which will fail allocation unless an ancestor of this `Node`
14289    ///   specified `for_child_nodes_also` true.
14290    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: NodeControlHandle },
14291    /// The server_end will be closed after this `Node` and any child nodes have
14292    /// have released their buffer counts, making those counts available for
14293    /// reservation by a different `Node` via
14294    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
14295    ///
14296    /// The `Node` buffer counts may not be released until the entire tree of
14297    /// `Node`(s) is closed or failed, because
14298    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
14299    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
14300    /// `Node` buffer counts remain reserved until the orphaned node is later
14301    /// cleaned up.
14302    ///
14303    /// If the `Node` exceeds a fairly large number of attached eventpair server
14304    /// ends, a log message will indicate this and the `Node` (and the
14305    /// appropriate) sub-tree will fail.
14306    ///
14307    /// The `server_end` will remain open when
14308    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
14309    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
14310    /// [`fuchsia.sysmem2/BufferCollection`].
14311    ///
14312    /// This message can also be used with a
14313    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
14314    AttachNodeTracking { payload: NodeAttachNodeTrackingRequest, control_handle: NodeControlHandle },
14315    /// An interaction was received which does not match any known method.
14316    #[non_exhaustive]
14317    _UnknownMethod {
14318        /// Ordinal of the method that was called.
14319        ordinal: u64,
14320        control_handle: NodeControlHandle,
14321        method_type: fidl::MethodType,
14322    },
14323}
14324
14325impl NodeRequest {
14326    #[allow(irrefutable_let_patterns)]
14327    pub fn into_sync(self) -> Option<(NodeSyncResponder)> {
14328        if let NodeRequest::Sync { responder } = self { Some((responder)) } else { None }
14329    }
14330
14331    #[allow(irrefutable_let_patterns)]
14332    pub fn into_release(self) -> Option<(NodeControlHandle)> {
14333        if let NodeRequest::Release { control_handle } = self {
14334            Some((control_handle))
14335        } else {
14336            None
14337        }
14338    }
14339
14340    #[allow(irrefutable_let_patterns)]
14341    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, NodeControlHandle)> {
14342        if let NodeRequest::SetName { payload, control_handle } = self {
14343            Some((payload, control_handle))
14344        } else {
14345            None
14346        }
14347    }
14348
14349    #[allow(irrefutable_let_patterns)]
14350    pub fn into_set_debug_client_info(
14351        self,
14352    ) -> Option<(NodeSetDebugClientInfoRequest, NodeControlHandle)> {
14353        if let NodeRequest::SetDebugClientInfo { payload, control_handle } = self {
14354            Some((payload, control_handle))
14355        } else {
14356            None
14357        }
14358    }
14359
14360    #[allow(irrefutable_let_patterns)]
14361    pub fn into_set_debug_timeout_log_deadline(
14362        self,
14363    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, NodeControlHandle)> {
14364        if let NodeRequest::SetDebugTimeoutLogDeadline { payload, control_handle } = self {
14365            Some((payload, control_handle))
14366        } else {
14367            None
14368        }
14369    }
14370
14371    #[allow(irrefutable_let_patterns)]
14372    pub fn into_set_verbose_logging(self) -> Option<(NodeControlHandle)> {
14373        if let NodeRequest::SetVerboseLogging { control_handle } = self {
14374            Some((control_handle))
14375        } else {
14376            None
14377        }
14378    }
14379
14380    #[allow(irrefutable_let_patterns)]
14381    pub fn into_get_node_ref(self) -> Option<(NodeGetNodeRefResponder)> {
14382        if let NodeRequest::GetNodeRef { responder } = self { Some((responder)) } else { None }
14383    }
14384
14385    #[allow(irrefutable_let_patterns)]
14386    pub fn into_is_alternate_for(
14387        self,
14388    ) -> Option<(NodeIsAlternateForRequest, NodeIsAlternateForResponder)> {
14389        if let NodeRequest::IsAlternateFor { payload, responder } = self {
14390            Some((payload, responder))
14391        } else {
14392            None
14393        }
14394    }
14395
14396    #[allow(irrefutable_let_patterns)]
14397    pub fn into_get_buffer_collection_id(self) -> Option<(NodeGetBufferCollectionIdResponder)> {
14398        if let NodeRequest::GetBufferCollectionId { responder } = self {
14399            Some((responder))
14400        } else {
14401            None
14402        }
14403    }
14404
14405    #[allow(irrefutable_let_patterns)]
14406    pub fn into_set_weak(self) -> Option<(NodeControlHandle)> {
14407        if let NodeRequest::SetWeak { control_handle } = self {
14408            Some((control_handle))
14409        } else {
14410            None
14411        }
14412    }
14413
14414    #[allow(irrefutable_let_patterns)]
14415    pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, NodeControlHandle)> {
14416        if let NodeRequest::SetWeakOk { payload, control_handle } = self {
14417            Some((payload, control_handle))
14418        } else {
14419            None
14420        }
14421    }
14422
14423    #[allow(irrefutable_let_patterns)]
14424    pub fn into_attach_node_tracking(
14425        self,
14426    ) -> Option<(NodeAttachNodeTrackingRequest, NodeControlHandle)> {
14427        if let NodeRequest::AttachNodeTracking { payload, control_handle } = self {
14428            Some((payload, control_handle))
14429        } else {
14430            None
14431        }
14432    }
14433
14434    /// Name of the method defined in FIDL
14435    pub fn method_name(&self) -> &'static str {
14436        match *self {
14437            NodeRequest::Sync { .. } => "sync",
14438            NodeRequest::Release { .. } => "release",
14439            NodeRequest::SetName { .. } => "set_name",
14440            NodeRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
14441            NodeRequest::SetDebugTimeoutLogDeadline { .. } => "set_debug_timeout_log_deadline",
14442            NodeRequest::SetVerboseLogging { .. } => "set_verbose_logging",
14443            NodeRequest::GetNodeRef { .. } => "get_node_ref",
14444            NodeRequest::IsAlternateFor { .. } => "is_alternate_for",
14445            NodeRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
14446            NodeRequest::SetWeak { .. } => "set_weak",
14447            NodeRequest::SetWeakOk { .. } => "set_weak_ok",
14448            NodeRequest::AttachNodeTracking { .. } => "attach_node_tracking",
14449            NodeRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
14450                "unknown one-way method"
14451            }
14452            NodeRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
14453                "unknown two-way method"
14454            }
14455        }
14456    }
14457}
14458
14459#[derive(Debug, Clone)]
14460pub struct NodeControlHandle {
14461    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
14462}
14463
14464impl fidl::endpoints::ControlHandle for NodeControlHandle {
14465    fn shutdown(&self) {
14466        self.inner.shutdown()
14467    }
14468    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
14469        self.inner.shutdown_with_epitaph(status)
14470    }
14471
14472    fn is_closed(&self) -> bool {
14473        self.inner.channel().is_closed()
14474    }
14475    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
14476        self.inner.channel().on_closed()
14477    }
14478
14479    #[cfg(target_os = "fuchsia")]
14480    fn signal_peer(
14481        &self,
14482        clear_mask: zx::Signals,
14483        set_mask: zx::Signals,
14484    ) -> Result<(), zx_status::Status> {
14485        use fidl::Peered;
14486        self.inner.channel().signal_peer(clear_mask, set_mask)
14487    }
14488}
14489
14490impl NodeControlHandle {}
14491
14492#[must_use = "FIDL methods require a response to be sent"]
14493#[derive(Debug)]
14494pub struct NodeSyncResponder {
14495    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14496    tx_id: u32,
14497}
14498
14499/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14500/// if the responder is dropped without sending a response, so that the client
14501/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14502impl std::ops::Drop for NodeSyncResponder {
14503    fn drop(&mut self) {
14504        self.control_handle.shutdown();
14505        // Safety: drops once, never accessed again
14506        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14507    }
14508}
14509
14510impl fidl::endpoints::Responder for NodeSyncResponder {
14511    type ControlHandle = NodeControlHandle;
14512
14513    fn control_handle(&self) -> &NodeControlHandle {
14514        &self.control_handle
14515    }
14516
14517    fn drop_without_shutdown(mut self) {
14518        // Safety: drops once, never accessed again due to mem::forget
14519        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14520        // Prevent Drop from running (which would shut down the channel)
14521        std::mem::forget(self);
14522    }
14523}
14524
14525impl NodeSyncResponder {
14526    /// Sends a response to the FIDL transaction.
14527    ///
14528    /// Sets the channel to shutdown if an error occurs.
14529    pub fn send(self) -> Result<(), fidl::Error> {
14530        let _result = self.send_raw();
14531        if _result.is_err() {
14532            self.control_handle.shutdown();
14533        }
14534        self.drop_without_shutdown();
14535        _result
14536    }
14537
14538    /// Similar to "send" but does not shutdown the channel if an error occurs.
14539    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
14540        let _result = self.send_raw();
14541        self.drop_without_shutdown();
14542        _result
14543    }
14544
14545    fn send_raw(&self) -> Result<(), fidl::Error> {
14546        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
14547            fidl::encoding::Flexible::new(()),
14548            self.tx_id,
14549            0x11ac2555cf575b54,
14550            fidl::encoding::DynamicFlags::FLEXIBLE,
14551        )
14552    }
14553}
14554
14555#[must_use = "FIDL methods require a response to be sent"]
14556#[derive(Debug)]
14557pub struct NodeGetNodeRefResponder {
14558    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14559    tx_id: u32,
14560}
14561
14562/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14563/// if the responder is dropped without sending a response, so that the client
14564/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14565impl std::ops::Drop for NodeGetNodeRefResponder {
14566    fn drop(&mut self) {
14567        self.control_handle.shutdown();
14568        // Safety: drops once, never accessed again
14569        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14570    }
14571}
14572
14573impl fidl::endpoints::Responder for NodeGetNodeRefResponder {
14574    type ControlHandle = NodeControlHandle;
14575
14576    fn control_handle(&self) -> &NodeControlHandle {
14577        &self.control_handle
14578    }
14579
14580    fn drop_without_shutdown(mut self) {
14581        // Safety: drops once, never accessed again due to mem::forget
14582        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14583        // Prevent Drop from running (which would shut down the channel)
14584        std::mem::forget(self);
14585    }
14586}
14587
14588impl NodeGetNodeRefResponder {
14589    /// Sends a response to the FIDL transaction.
14590    ///
14591    /// Sets the channel to shutdown if an error occurs.
14592    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14593        let _result = self.send_raw(payload);
14594        if _result.is_err() {
14595            self.control_handle.shutdown();
14596        }
14597        self.drop_without_shutdown();
14598        _result
14599    }
14600
14601    /// Similar to "send" but does not shutdown the channel if an error occurs.
14602    pub fn send_no_shutdown_on_err(
14603        self,
14604        mut payload: NodeGetNodeRefResponse,
14605    ) -> Result<(), fidl::Error> {
14606        let _result = self.send_raw(payload);
14607        self.drop_without_shutdown();
14608        _result
14609    }
14610
14611    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14612        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
14613            fidl::encoding::Flexible::new(&mut payload),
14614            self.tx_id,
14615            0x5b3d0e51614df053,
14616            fidl::encoding::DynamicFlags::FLEXIBLE,
14617        )
14618    }
14619}
14620
14621#[must_use = "FIDL methods require a response to be sent"]
14622#[derive(Debug)]
14623pub struct NodeIsAlternateForResponder {
14624    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14625    tx_id: u32,
14626}
14627
14628/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14629/// if the responder is dropped without sending a response, so that the client
14630/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14631impl std::ops::Drop for NodeIsAlternateForResponder {
14632    fn drop(&mut self) {
14633        self.control_handle.shutdown();
14634        // Safety: drops once, never accessed again
14635        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14636    }
14637}
14638
14639impl fidl::endpoints::Responder for NodeIsAlternateForResponder {
14640    type ControlHandle = NodeControlHandle;
14641
14642    fn control_handle(&self) -> &NodeControlHandle {
14643        &self.control_handle
14644    }
14645
14646    fn drop_without_shutdown(mut self) {
14647        // Safety: drops once, never accessed again due to mem::forget
14648        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14649        // Prevent Drop from running (which would shut down the channel)
14650        std::mem::forget(self);
14651    }
14652}
14653
14654impl NodeIsAlternateForResponder {
14655    /// Sends a response to the FIDL transaction.
14656    ///
14657    /// Sets the channel to shutdown if an error occurs.
14658    pub fn send(
14659        self,
14660        mut result: Result<&NodeIsAlternateForResponse, Error>,
14661    ) -> Result<(), fidl::Error> {
14662        let _result = self.send_raw(result);
14663        if _result.is_err() {
14664            self.control_handle.shutdown();
14665        }
14666        self.drop_without_shutdown();
14667        _result
14668    }
14669
14670    /// Similar to "send" but does not shutdown the channel if an error occurs.
14671    pub fn send_no_shutdown_on_err(
14672        self,
14673        mut result: Result<&NodeIsAlternateForResponse, Error>,
14674    ) -> Result<(), fidl::Error> {
14675        let _result = self.send_raw(result);
14676        self.drop_without_shutdown();
14677        _result
14678    }
14679
14680    fn send_raw(
14681        &self,
14682        mut result: Result<&NodeIsAlternateForResponse, Error>,
14683    ) -> Result<(), fidl::Error> {
14684        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
14685            NodeIsAlternateForResponse,
14686            Error,
14687        >>(
14688            fidl::encoding::FlexibleResult::new(result),
14689            self.tx_id,
14690            0x3a58e00157e0825,
14691            fidl::encoding::DynamicFlags::FLEXIBLE,
14692        )
14693    }
14694}
14695
14696#[must_use = "FIDL methods require a response to be sent"]
14697#[derive(Debug)]
14698pub struct NodeGetBufferCollectionIdResponder {
14699    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14700    tx_id: u32,
14701}
14702
14703/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14704/// if the responder is dropped without sending a response, so that the client
14705/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14706impl std::ops::Drop for NodeGetBufferCollectionIdResponder {
14707    fn drop(&mut self) {
14708        self.control_handle.shutdown();
14709        // Safety: drops once, never accessed again
14710        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14711    }
14712}
14713
14714impl fidl::endpoints::Responder for NodeGetBufferCollectionIdResponder {
14715    type ControlHandle = NodeControlHandle;
14716
14717    fn control_handle(&self) -> &NodeControlHandle {
14718        &self.control_handle
14719    }
14720
14721    fn drop_without_shutdown(mut self) {
14722        // Safety: drops once, never accessed again due to mem::forget
14723        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14724        // Prevent Drop from running (which would shut down the channel)
14725        std::mem::forget(self);
14726    }
14727}
14728
14729impl NodeGetBufferCollectionIdResponder {
14730    /// Sends a response to the FIDL transaction.
14731    ///
14732    /// Sets the channel to shutdown if an error occurs.
14733    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14734        let _result = self.send_raw(payload);
14735        if _result.is_err() {
14736            self.control_handle.shutdown();
14737        }
14738        self.drop_without_shutdown();
14739        _result
14740    }
14741
14742    /// Similar to "send" but does not shutdown the channel if an error occurs.
14743    pub fn send_no_shutdown_on_err(
14744        self,
14745        mut payload: &NodeGetBufferCollectionIdResponse,
14746    ) -> Result<(), fidl::Error> {
14747        let _result = self.send_raw(payload);
14748        self.drop_without_shutdown();
14749        _result
14750    }
14751
14752    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14753        self.control_handle
14754            .inner
14755            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
14756                fidl::encoding::Flexible::new(payload),
14757                self.tx_id,
14758                0x77d19a494b78ba8c,
14759                fidl::encoding::DynamicFlags::FLEXIBLE,
14760            )
14761    }
14762}
14763
14764#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
14765pub struct SecureMemMarker;
14766
14767impl fidl::endpoints::ProtocolMarker for SecureMemMarker {
14768    type Proxy = SecureMemProxy;
14769    type RequestStream = SecureMemRequestStream;
14770    #[cfg(target_os = "fuchsia")]
14771    type SynchronousProxy = SecureMemSynchronousProxy;
14772
14773    const DEBUG_NAME: &'static str = "(anonymous) SecureMem";
14774}
14775pub type SecureMemGetPhysicalSecureHeapsResult =
14776    Result<SecureMemGetPhysicalSecureHeapsResponse, Error>;
14777pub type SecureMemGetDynamicSecureHeapsResult =
14778    Result<SecureMemGetDynamicSecureHeapsResponse, Error>;
14779pub type SecureMemGetPhysicalSecureHeapPropertiesResult =
14780    Result<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>;
14781pub type SecureMemAddSecureHeapPhysicalRangeResult = Result<(), Error>;
14782pub type SecureMemDeleteSecureHeapPhysicalRangeResult = Result<(), Error>;
14783pub type SecureMemModifySecureHeapPhysicalRangeResult = Result<(), Error>;
14784pub type SecureMemZeroSubRangeResult = Result<(), Error>;
14785
14786pub trait SecureMemProxyInterface: Send + Sync {
14787    type GetPhysicalSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error>>
14788        + Send;
14789    fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut;
14790    type GetDynamicSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error>>
14791        + Send;
14792    fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut;
14793    type GetPhysicalSecureHeapPropertiesResponseFut: std::future::Future<
14794            Output = Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error>,
14795        > + Send;
14796    fn r#get_physical_secure_heap_properties(
14797        &self,
14798        payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14799    ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut;
14800    type AddSecureHeapPhysicalRangeResponseFut: std::future::Future<Output = Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error>>
14801        + Send;
14802    fn r#add_secure_heap_physical_range(
14803        &self,
14804        payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
14805    ) -> Self::AddSecureHeapPhysicalRangeResponseFut;
14806    type DeleteSecureHeapPhysicalRangeResponseFut: std::future::Future<
14807            Output = Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error>,
14808        > + Send;
14809    fn r#delete_secure_heap_physical_range(
14810        &self,
14811        payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
14812    ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut;
14813    type ModifySecureHeapPhysicalRangeResponseFut: std::future::Future<
14814            Output = Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error>,
14815        > + Send;
14816    fn r#modify_secure_heap_physical_range(
14817        &self,
14818        payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
14819    ) -> Self::ModifySecureHeapPhysicalRangeResponseFut;
14820    type ZeroSubRangeResponseFut: std::future::Future<Output = Result<SecureMemZeroSubRangeResult, fidl::Error>>
14821        + Send;
14822    fn r#zero_sub_range(
14823        &self,
14824        payload: &SecureMemZeroSubRangeRequest,
14825    ) -> Self::ZeroSubRangeResponseFut;
14826}
14827#[derive(Debug)]
14828#[cfg(target_os = "fuchsia")]
14829pub struct SecureMemSynchronousProxy {
14830    client: fidl::client::sync::Client,
14831}
14832
14833#[cfg(target_os = "fuchsia")]
14834impl fidl::endpoints::SynchronousProxy for SecureMemSynchronousProxy {
14835    type Proxy = SecureMemProxy;
14836    type Protocol = SecureMemMarker;
14837
14838    fn from_channel(inner: fidl::Channel) -> Self {
14839        Self::new(inner)
14840    }
14841
14842    fn into_channel(self) -> fidl::Channel {
14843        self.client.into_channel()
14844    }
14845
14846    fn as_channel(&self) -> &fidl::Channel {
14847        self.client.as_channel()
14848    }
14849}
14850
14851#[cfg(target_os = "fuchsia")]
14852impl SecureMemSynchronousProxy {
14853    pub fn new(channel: fidl::Channel) -> Self {
14854        let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
14855        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
14856    }
14857
14858    pub fn into_channel(self) -> fidl::Channel {
14859        self.client.into_channel()
14860    }
14861
14862    /// Waits until an event arrives and returns it. It is safe for other
14863    /// threads to make concurrent requests while waiting for an event.
14864    pub fn wait_for_event(
14865        &self,
14866        deadline: zx::MonotonicInstant,
14867    ) -> Result<SecureMemEvent, fidl::Error> {
14868        SecureMemEvent::decode(self.client.wait_for_event(deadline)?)
14869    }
14870
14871    /// Gets the physical address and length of any secure heap whose physical
14872    /// range is configured via the TEE.
14873    ///
14874    /// Presently, these will be fixed physical addresses and lengths, with the
14875    /// location plumbed via the TEE.
14876    ///
14877    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
14878    /// when there isn't any special heap-specific per-VMO setup or teardown
14879    /// required.
14880    ///
14881    /// The physical range must be secured/protected by the TEE before the
14882    /// securemem driver responds to this request with success.
14883    ///
14884    /// Sysmem should only call this once.  Returning zero heaps is not a
14885    /// failure.
14886    ///
14887    /// Errors:
14888    ///  * PROTOCOL_DEVIATION - called more than once.
14889    ///  * UNSPECIFIED - generic internal error (such as in communication
14890    ///    with TEE which doesn't generate zx_status_t errors).
14891    ///  * other errors are allowed; any other errors should be treated the same
14892    ///    as UNSPECIFIED.
14893    pub fn r#get_physical_secure_heaps(
14894        &self,
14895        ___deadline: zx::MonotonicInstant,
14896    ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
14897        let _response = self.client.send_query::<
14898            fidl::encoding::EmptyPayload,
14899            fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
14900        >(
14901            (),
14902            0x38716300592073e3,
14903            fidl::encoding::DynamicFlags::FLEXIBLE,
14904            ___deadline,
14905        )?
14906        .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
14907        Ok(_response.map(|x| x))
14908    }
14909
14910    /// Gets information about any secure heaps whose physical pages are not
14911    /// configured by the TEE, but by sysmem.
14912    ///
14913    /// Sysmem should only call this once. Returning zero heaps is not a
14914    /// failure.
14915    ///
14916    /// Errors:
14917    ///  * PROTOCOL_DEVIATION - called more than once.
14918    ///  * UNSPECIFIED - generic internal error (such as in communication
14919    ///    with TEE which doesn't generate zx_status_t errors).
14920    ///  * other errors are allowed; any other errors should be treated the same
14921    ///    as UNSPECIFIED.
14922    pub fn r#get_dynamic_secure_heaps(
14923        &self,
14924        ___deadline: zx::MonotonicInstant,
14925    ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
14926        let _response = self.client.send_query::<
14927            fidl::encoding::EmptyPayload,
14928            fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
14929        >(
14930            (),
14931            0x1190847f99952834,
14932            fidl::encoding::DynamicFlags::FLEXIBLE,
14933            ___deadline,
14934        )?
14935        .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
14936        Ok(_response.map(|x| x))
14937    }
14938
14939    /// This request from sysmem to the securemem driver gets the properties of
14940    /// a protected/secure heap.
14941    ///
14942    /// This only handles heaps with a single contiguous physical extent.
14943    ///
14944    /// The heap's entire physical range is indicated in case this request needs
14945    /// some physical space to auto-detect how many ranges are REE-usable.  Any
14946    /// temporary HW protection ranges will be deleted before this request
14947    /// completes.
14948    ///
14949    /// Errors:
14950    ///  * UNSPECIFIED - generic internal error (such as in communication
14951    ///    with TEE which doesn't generate zx_status_t errors).
14952    ///  * other errors are allowed; any other errors should be treated the same
14953    ///    as UNSPECIFIED.
14954    pub fn r#get_physical_secure_heap_properties(
14955        &self,
14956        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14957        ___deadline: zx::MonotonicInstant,
14958    ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
14959        let _response = self.client.send_query::<
14960            SecureMemGetPhysicalSecureHeapPropertiesRequest,
14961            fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
14962        >(
14963            payload,
14964            0xc6f06889009c7bc,
14965            fidl::encoding::DynamicFlags::FLEXIBLE,
14966            ___deadline,
14967        )?
14968        .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
14969        Ok(_response.map(|x| x))
14970    }
14971
14972    /// This request from sysmem to the securemem driver conveys a physical
14973    /// range to add, for a heap whose physical range(s) are set up via
14974    /// sysmem.
14975    ///
14976    /// Only sysmem can call this because only sysmem is handed the client end
14977    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
14978    /// securemem driver is the server end of this protocol.
14979    ///
14980    /// The securemem driver must configure all the covered offsets as protected
14981    /// before responding to this message with success.
14982    ///
14983    /// On failure, the securemem driver must ensure the protected range was not
14984    /// created.
14985    ///
14986    /// Sysmem must only call this up to once if dynamic_protection_ranges
14987    /// false.
14988    ///
14989    /// If dynamic_protection_ranges is true, sysmem can call this multiple
14990    /// times as long as the current number of ranges never exceeds
14991    /// max_protected_range_count.
14992    ///
14993    /// The caller must not attempt to add a range that matches an
14994    /// already-existing range.  Added ranges can overlap each other as long as
14995    /// no two ranges match exactly.
14996    ///
14997    /// Errors:
14998    ///   * PROTOCOL_DEVIATION - called more than once when
14999    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
15000    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
15001    ///     range that doesn't conform to protected_range_granularity. See log.
15002    ///   * UNSPECIFIED - generic internal error (such as in communication
15003    ///     with TEE which doesn't generate zx_status_t errors).
15004    ///   * other errors are possible, such as from communication failures or
15005    ///     server propagation of failures.
15006    pub fn r#add_secure_heap_physical_range(
15007        &self,
15008        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15009        ___deadline: zx::MonotonicInstant,
15010    ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15011        let _response = self.client.send_query::<
15012            SecureMemAddSecureHeapPhysicalRangeRequest,
15013            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15014        >(
15015            payload,
15016            0x35f695b9b6c7217a,
15017            fidl::encoding::DynamicFlags::FLEXIBLE,
15018            ___deadline,
15019        )?
15020        .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15021        Ok(_response.map(|x| x))
15022    }
15023
15024    /// This request from sysmem to the securemem driver conveys a physical
15025    /// range to delete, for a heap whose physical range(s) are set up via
15026    /// sysmem.
15027    ///
15028    /// Only sysmem can call this because only sysmem is handed the client end
15029    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15030    /// securemem driver is the server end of this protocol.
15031    ///
15032    /// The securemem driver must configure all the covered offsets as not
15033    /// protected before responding to this message with success.
15034    ///
15035    /// On failure, the securemem driver must ensure the protected range was not
15036    /// deleted.
15037    ///
15038    /// Sysmem must not call this if dynamic_protection_ranges false.
15039    ///
15040    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15041    /// on various ranges that exist at the time of the call.
15042    ///
15043    /// If any portion of the range being deleted is not also covered by another
15044    /// protected range, then any ongoing DMA to any part of the entire range
15045    /// may be interrupted / may fail, potentially in a way that's disruptive to
15046    /// the entire system (bus lockup or similar, depending on device details).
15047    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15048    /// any portion of the range being deleted, unless the caller has other
15049    /// active ranges covering every block of the range being deleted.  Ongoing
15050    /// DMA to/from blocks outside the range being deleted is never impacted by
15051    /// the deletion.
15052    ///
15053    /// Errors:
15054    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15055    ///     Unexpected heap, or range that doesn't conform to
15056    ///     protected_range_granularity.
15057    ///   * UNSPECIFIED - generic internal error (such as in communication
15058    ///     with TEE which doesn't generate zx_status_t errors).
15059    ///   * NOT_FOUND - the specified range is not found.
15060    ///   * other errors are possible, such as from communication failures or
15061    ///     server propagation of failures.
15062    pub fn r#delete_secure_heap_physical_range(
15063        &self,
15064        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15065        ___deadline: zx::MonotonicInstant,
15066    ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15067        let _response = self.client.send_query::<
15068            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15069            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15070        >(
15071            payload,
15072            0xeaa58c650264c9e,
15073            fidl::encoding::DynamicFlags::FLEXIBLE,
15074            ___deadline,
15075        )?
15076        .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15077        Ok(_response.map(|x| x))
15078    }
15079
15080    /// This request from sysmem to the securemem driver conveys a physical
15081    /// range to modify and its new base and length, for a heap whose physical
15082    /// range(s) are set up via sysmem.
15083    ///
15084    /// Only sysmem can call this because only sysmem is handed the client end
15085    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15086    /// securemem driver is the server end of this protocol.
15087    ///
15088    /// The securemem driver must configure the range to cover only the new
15089    /// offsets before responding to this message with success.
15090    ///
15091    /// On failure, the securemem driver must ensure the range was not changed.
15092    ///
15093    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
15094    /// must not call this if !is_mod_protected_range_available.
15095    ///
15096    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15097    /// on various ranges that exist at the time of the call.
15098    ///
15099    /// The range must only be modified at one end or the other, but not both.
15100    /// If the range is getting shorter, and the un-covered blocks are not
15101    /// covered by other active ranges, any ongoing DMA to the entire range
15102    /// that's geting shorter may fail in a way that disrupts the entire system
15103    /// (bus lockup or similar), so the caller must ensure that no DMA is
15104    /// ongoing to any portion of a range that is getting shorter, unless the
15105    /// blocks being un-covered by the modification to this range are all
15106    /// covered by other active ranges, in which case no disruption to ongoing
15107    /// DMA will occur.
15108    ///
15109    /// If a range is modified to become <= zero length, the range is deleted.
15110    ///
15111    /// Errors:
15112    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15113    ///     Unexpected heap, or old_range or new_range that doesn't conform to
15114    ///     protected_range_granularity, or old_range and new_range differ in
15115    ///     both begin and end (disallowed).
15116    ///   * UNSPECIFIED - generic internal error (such as in communication
15117    ///     with TEE which doesn't generate zx_status_t errors).
15118    ///   * NOT_FOUND - the specified range is not found.
15119    ///   * other errors are possible, such as from communication failures or
15120    ///     server propagation of failures.
15121    pub fn r#modify_secure_heap_physical_range(
15122        &self,
15123        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15124        ___deadline: zx::MonotonicInstant,
15125    ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15126        let _response = self.client.send_query::<
15127            SecureMemModifySecureHeapPhysicalRangeRequest,
15128            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15129        >(
15130            payload,
15131            0x60b7448aa1187734,
15132            fidl::encoding::DynamicFlags::FLEXIBLE,
15133            ___deadline,
15134        )?
15135        .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15136        Ok(_response.map(|x| x))
15137    }
15138
15139    /// Zero a sub-range of a currently-existing physical range added via
15140    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
15141    /// exactly one physical range, and must not overlap with any other
15142    /// physical range.
15143    ///
15144    /// is_covering_range_explicit - When true, the covering range must be one
15145    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15146    ///     possibly modified since.  When false, the covering range must not
15147    ///     be one of the ranges explicitly created via
15148    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
15149    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
15150    ///     covering range is typically the entire physical range (or a range
15151    ///     which covers even more) of a heap configured by the TEE and whose
15152    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15153    ///
15154    /// Ongoing DMA is not disrupted by this request.
15155    ///
15156    /// Errors:
15157    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15158    ///     Unexpected heap.
15159    ///   * UNSPECIFIED - generic internal error (such as in communication
15160    ///     with TEE which doesn't generate zx_status_t errors).
15161    ///   * other errors are possible, such as from communication failures or
15162    ///     server propagation of failures.
15163    pub fn r#zero_sub_range(
15164        &self,
15165        mut payload: &SecureMemZeroSubRangeRequest,
15166        ___deadline: zx::MonotonicInstant,
15167    ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15168        let _response = self.client.send_query::<
15169            SecureMemZeroSubRangeRequest,
15170            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15171        >(
15172            payload,
15173            0x5b25b7901a385ce5,
15174            fidl::encoding::DynamicFlags::FLEXIBLE,
15175            ___deadline,
15176        )?
15177        .into_result::<SecureMemMarker>("zero_sub_range")?;
15178        Ok(_response.map(|x| x))
15179    }
15180}
15181
15182#[cfg(target_os = "fuchsia")]
15183impl From<SecureMemSynchronousProxy> for zx::Handle {
15184    fn from(value: SecureMemSynchronousProxy) -> Self {
15185        value.into_channel().into()
15186    }
15187}
15188
15189#[cfg(target_os = "fuchsia")]
15190impl From<fidl::Channel> for SecureMemSynchronousProxy {
15191    fn from(value: fidl::Channel) -> Self {
15192        Self::new(value)
15193    }
15194}
15195
15196#[cfg(target_os = "fuchsia")]
15197impl fidl::endpoints::FromClient for SecureMemSynchronousProxy {
15198    type Protocol = SecureMemMarker;
15199
15200    fn from_client(value: fidl::endpoints::ClientEnd<SecureMemMarker>) -> Self {
15201        Self::new(value.into_channel())
15202    }
15203}
15204
15205#[derive(Debug, Clone)]
15206pub struct SecureMemProxy {
15207    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
15208}
15209
15210impl fidl::endpoints::Proxy for SecureMemProxy {
15211    type Protocol = SecureMemMarker;
15212
15213    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
15214        Self::new(inner)
15215    }
15216
15217    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
15218        self.client.into_channel().map_err(|client| Self { client })
15219    }
15220
15221    fn as_channel(&self) -> &::fidl::AsyncChannel {
15222        self.client.as_channel()
15223    }
15224}
15225
15226impl SecureMemProxy {
15227    /// Create a new Proxy for fuchsia.sysmem2/SecureMem.
15228    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
15229        let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
15230        Self { client: fidl::client::Client::new(channel, protocol_name) }
15231    }
15232
15233    /// Get a Stream of events from the remote end of the protocol.
15234    ///
15235    /// # Panics
15236    ///
15237    /// Panics if the event stream was already taken.
15238    pub fn take_event_stream(&self) -> SecureMemEventStream {
15239        SecureMemEventStream { event_receiver: self.client.take_event_receiver() }
15240    }
15241
15242    /// Gets the physical address and length of any secure heap whose physical
15243    /// range is configured via the TEE.
15244    ///
15245    /// Presently, these will be fixed physical addresses and lengths, with the
15246    /// location plumbed via the TEE.
15247    ///
15248    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
15249    /// when there isn't any special heap-specific per-VMO setup or teardown
15250    /// required.
15251    ///
15252    /// The physical range must be secured/protected by the TEE before the
15253    /// securemem driver responds to this request with success.
15254    ///
15255    /// Sysmem should only call this once.  Returning zero heaps is not a
15256    /// failure.
15257    ///
15258    /// Errors:
15259    ///  * PROTOCOL_DEVIATION - called more than once.
15260    ///  * UNSPECIFIED - generic internal error (such as in communication
15261    ///    with TEE which doesn't generate zx_status_t errors).
15262    ///  * other errors are allowed; any other errors should be treated the same
15263    ///    as UNSPECIFIED.
15264    pub fn r#get_physical_secure_heaps(
15265        &self,
15266    ) -> fidl::client::QueryResponseFut<
15267        SecureMemGetPhysicalSecureHeapsResult,
15268        fidl::encoding::DefaultFuchsiaResourceDialect,
15269    > {
15270        SecureMemProxyInterface::r#get_physical_secure_heaps(self)
15271    }
15272
15273    /// Gets information about any secure heaps whose physical pages are not
15274    /// configured by the TEE, but by sysmem.
15275    ///
15276    /// Sysmem should only call this once. Returning zero heaps is not a
15277    /// failure.
15278    ///
15279    /// Errors:
15280    ///  * PROTOCOL_DEVIATION - called more than once.
15281    ///  * UNSPECIFIED - generic internal error (such as in communication
15282    ///    with TEE which doesn't generate zx_status_t errors).
15283    ///  * other errors are allowed; any other errors should be treated the same
15284    ///    as UNSPECIFIED.
15285    pub fn r#get_dynamic_secure_heaps(
15286        &self,
15287    ) -> fidl::client::QueryResponseFut<
15288        SecureMemGetDynamicSecureHeapsResult,
15289        fidl::encoding::DefaultFuchsiaResourceDialect,
15290    > {
15291        SecureMemProxyInterface::r#get_dynamic_secure_heaps(self)
15292    }
15293
15294    /// This request from sysmem to the securemem driver gets the properties of
15295    /// a protected/secure heap.
15296    ///
15297    /// This only handles heaps with a single contiguous physical extent.
15298    ///
15299    /// The heap's entire physical range is indicated in case this request needs
15300    /// some physical space to auto-detect how many ranges are REE-usable.  Any
15301    /// temporary HW protection ranges will be deleted before this request
15302    /// completes.
15303    ///
15304    /// Errors:
15305    ///  * UNSPECIFIED - generic internal error (such as in communication
15306    ///    with TEE which doesn't generate zx_status_t errors).
15307    ///  * other errors are allowed; any other errors should be treated the same
15308    ///    as UNSPECIFIED.
15309    pub fn r#get_physical_secure_heap_properties(
15310        &self,
15311        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15312    ) -> fidl::client::QueryResponseFut<
15313        SecureMemGetPhysicalSecureHeapPropertiesResult,
15314        fidl::encoding::DefaultFuchsiaResourceDialect,
15315    > {
15316        SecureMemProxyInterface::r#get_physical_secure_heap_properties(self, payload)
15317    }
15318
15319    /// This request from sysmem to the securemem driver conveys a physical
15320    /// range to add, for a heap whose physical range(s) are set up via
15321    /// sysmem.
15322    ///
15323    /// Only sysmem can call this because only sysmem is handed the client end
15324    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15325    /// securemem driver is the server end of this protocol.
15326    ///
15327    /// The securemem driver must configure all the covered offsets as protected
15328    /// before responding to this message with success.
15329    ///
15330    /// On failure, the securemem driver must ensure the protected range was not
15331    /// created.
15332    ///
15333    /// Sysmem must only call this up to once if dynamic_protection_ranges
15334    /// false.
15335    ///
15336    /// If dynamic_protection_ranges is true, sysmem can call this multiple
15337    /// times as long as the current number of ranges never exceeds
15338    /// max_protected_range_count.
15339    ///
15340    /// The caller must not attempt to add a range that matches an
15341    /// already-existing range.  Added ranges can overlap each other as long as
15342    /// no two ranges match exactly.
15343    ///
15344    /// Errors:
15345    ///   * PROTOCOL_DEVIATION - called more than once when
15346    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
15347    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
15348    ///     range that doesn't conform to protected_range_granularity. See log.
15349    ///   * UNSPECIFIED - generic internal error (such as in communication
15350    ///     with TEE which doesn't generate zx_status_t errors).
15351    ///   * other errors are possible, such as from communication failures or
15352    ///     server propagation of failures.
15353    pub fn r#add_secure_heap_physical_range(
15354        &self,
15355        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15356    ) -> fidl::client::QueryResponseFut<
15357        SecureMemAddSecureHeapPhysicalRangeResult,
15358        fidl::encoding::DefaultFuchsiaResourceDialect,
15359    > {
15360        SecureMemProxyInterface::r#add_secure_heap_physical_range(self, payload)
15361    }
15362
15363    /// This request from sysmem to the securemem driver conveys a physical
15364    /// range to delete, for a heap whose physical range(s) are set up via
15365    /// sysmem.
15366    ///
15367    /// Only sysmem can call this because only sysmem is handed the client end
15368    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15369    /// securemem driver is the server end of this protocol.
15370    ///
15371    /// The securemem driver must configure all the covered offsets as not
15372    /// protected before responding to this message with success.
15373    ///
15374    /// On failure, the securemem driver must ensure the protected range was not
15375    /// deleted.
15376    ///
15377    /// Sysmem must not call this if dynamic_protection_ranges false.
15378    ///
15379    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15380    /// on various ranges that exist at the time of the call.
15381    ///
15382    /// If any portion of the range being deleted is not also covered by another
15383    /// protected range, then any ongoing DMA to any part of the entire range
15384    /// may be interrupted / may fail, potentially in a way that's disruptive to
15385    /// the entire system (bus lockup or similar, depending on device details).
15386    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15387    /// any portion of the range being deleted, unless the caller has other
15388    /// active ranges covering every block of the range being deleted.  Ongoing
15389    /// DMA to/from blocks outside the range being deleted is never impacted by
15390    /// the deletion.
15391    ///
15392    /// Errors:
15393    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15394    ///     Unexpected heap, or range that doesn't conform to
15395    ///     protected_range_granularity.
15396    ///   * UNSPECIFIED - generic internal error (such as in communication
15397    ///     with TEE which doesn't generate zx_status_t errors).
15398    ///   * NOT_FOUND - the specified range is not found.
15399    ///   * other errors are possible, such as from communication failures or
15400    ///     server propagation of failures.
15401    pub fn r#delete_secure_heap_physical_range(
15402        &self,
15403        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15404    ) -> fidl::client::QueryResponseFut<
15405        SecureMemDeleteSecureHeapPhysicalRangeResult,
15406        fidl::encoding::DefaultFuchsiaResourceDialect,
15407    > {
15408        SecureMemProxyInterface::r#delete_secure_heap_physical_range(self, payload)
15409    }
15410
15411    /// This request from sysmem to the securemem driver conveys a physical
15412    /// range to modify and its new base and length, for a heap whose physical
15413    /// range(s) are set up via sysmem.
15414    ///
15415    /// Only sysmem can call this because only sysmem is handed the client end
15416    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15417    /// securemem driver is the server end of this protocol.
15418    ///
15419    /// The securemem driver must configure the range to cover only the new
15420    /// offsets before responding to this message with success.
15421    ///
15422    /// On failure, the securemem driver must ensure the range was not changed.
15423    ///
15424    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
15425    /// must not call this if !is_mod_protected_range_available.
15426    ///
15427    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15428    /// on various ranges that exist at the time of the call.
15429    ///
15430    /// The range must only be modified at one end or the other, but not both.
15431    /// If the range is getting shorter, and the un-covered blocks are not
15432    /// covered by other active ranges, any ongoing DMA to the entire range
15433    /// that's geting shorter may fail in a way that disrupts the entire system
15434    /// (bus lockup or similar), so the caller must ensure that no DMA is
15435    /// ongoing to any portion of a range that is getting shorter, unless the
15436    /// blocks being un-covered by the modification to this range are all
15437    /// covered by other active ranges, in which case no disruption to ongoing
15438    /// DMA will occur.
15439    ///
15440    /// If a range is modified to become <= zero length, the range is deleted.
15441    ///
15442    /// Errors:
15443    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15444    ///     Unexpected heap, or old_range or new_range that doesn't conform to
15445    ///     protected_range_granularity, or old_range and new_range differ in
15446    ///     both begin and end (disallowed).
15447    ///   * UNSPECIFIED - generic internal error (such as in communication
15448    ///     with TEE which doesn't generate zx_status_t errors).
15449    ///   * NOT_FOUND - the specified range is not found.
15450    ///   * other errors are possible, such as from communication failures or
15451    ///     server propagation of failures.
15452    pub fn r#modify_secure_heap_physical_range(
15453        &self,
15454        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15455    ) -> fidl::client::QueryResponseFut<
15456        SecureMemModifySecureHeapPhysicalRangeResult,
15457        fidl::encoding::DefaultFuchsiaResourceDialect,
15458    > {
15459        SecureMemProxyInterface::r#modify_secure_heap_physical_range(self, payload)
15460    }
15461
15462    /// Zero a sub-range of a currently-existing physical range added via
15463    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
15464    /// exactly one physical range, and must not overlap with any other
15465    /// physical range.
15466    ///
15467    /// is_covering_range_explicit - When true, the covering range must be one
15468    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15469    ///     possibly modified since.  When false, the covering range must not
15470    ///     be one of the ranges explicitly created via
15471    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
15472    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
15473    ///     covering range is typically the entire physical range (or a range
15474    ///     which covers even more) of a heap configured by the TEE and whose
15475    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15476    ///
15477    /// Ongoing DMA is not disrupted by this request.
15478    ///
15479    /// Errors:
15480    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15481    ///     Unexpected heap.
15482    ///   * UNSPECIFIED - generic internal error (such as in communication
15483    ///     with TEE which doesn't generate zx_status_t errors).
15484    ///   * other errors are possible, such as from communication failures or
15485    ///     server propagation of failures.
15486    pub fn r#zero_sub_range(
15487        &self,
15488        mut payload: &SecureMemZeroSubRangeRequest,
15489    ) -> fidl::client::QueryResponseFut<
15490        SecureMemZeroSubRangeResult,
15491        fidl::encoding::DefaultFuchsiaResourceDialect,
15492    > {
15493        SecureMemProxyInterface::r#zero_sub_range(self, payload)
15494    }
15495}
15496
15497impl SecureMemProxyInterface for SecureMemProxy {
15498    type GetPhysicalSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15499        SecureMemGetPhysicalSecureHeapsResult,
15500        fidl::encoding::DefaultFuchsiaResourceDialect,
15501    >;
15502    fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut {
15503        fn _decode(
15504            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15505        ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
15506            let _response = fidl::client::decode_transaction_body::<
15507                fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
15508                fidl::encoding::DefaultFuchsiaResourceDialect,
15509                0x38716300592073e3,
15510            >(_buf?)?
15511            .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
15512            Ok(_response.map(|x| x))
15513        }
15514        self.client.send_query_and_decode::<
15515            fidl::encoding::EmptyPayload,
15516            SecureMemGetPhysicalSecureHeapsResult,
15517        >(
15518            (),
15519            0x38716300592073e3,
15520            fidl::encoding::DynamicFlags::FLEXIBLE,
15521            _decode,
15522        )
15523    }
15524
15525    type GetDynamicSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15526        SecureMemGetDynamicSecureHeapsResult,
15527        fidl::encoding::DefaultFuchsiaResourceDialect,
15528    >;
15529    fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut {
15530        fn _decode(
15531            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15532        ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
15533            let _response = fidl::client::decode_transaction_body::<
15534                fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
15535                fidl::encoding::DefaultFuchsiaResourceDialect,
15536                0x1190847f99952834,
15537            >(_buf?)?
15538            .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
15539            Ok(_response.map(|x| x))
15540        }
15541        self.client.send_query_and_decode::<
15542            fidl::encoding::EmptyPayload,
15543            SecureMemGetDynamicSecureHeapsResult,
15544        >(
15545            (),
15546            0x1190847f99952834,
15547            fidl::encoding::DynamicFlags::FLEXIBLE,
15548            _decode,
15549        )
15550    }
15551
15552    type GetPhysicalSecureHeapPropertiesResponseFut = fidl::client::QueryResponseFut<
15553        SecureMemGetPhysicalSecureHeapPropertiesResult,
15554        fidl::encoding::DefaultFuchsiaResourceDialect,
15555    >;
15556    fn r#get_physical_secure_heap_properties(
15557        &self,
15558        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15559    ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut {
15560        fn _decode(
15561            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15562        ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
15563            let _response = fidl::client::decode_transaction_body::<
15564                fidl::encoding::FlexibleResultType<
15565                    SecureMemGetPhysicalSecureHeapPropertiesResponse,
15566                    Error,
15567                >,
15568                fidl::encoding::DefaultFuchsiaResourceDialect,
15569                0xc6f06889009c7bc,
15570            >(_buf?)?
15571            .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
15572            Ok(_response.map(|x| x))
15573        }
15574        self.client.send_query_and_decode::<
15575            SecureMemGetPhysicalSecureHeapPropertiesRequest,
15576            SecureMemGetPhysicalSecureHeapPropertiesResult,
15577        >(
15578            payload,
15579            0xc6f06889009c7bc,
15580            fidl::encoding::DynamicFlags::FLEXIBLE,
15581            _decode,
15582        )
15583    }
15584
15585    type AddSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15586        SecureMemAddSecureHeapPhysicalRangeResult,
15587        fidl::encoding::DefaultFuchsiaResourceDialect,
15588    >;
15589    fn r#add_secure_heap_physical_range(
15590        &self,
15591        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15592    ) -> Self::AddSecureHeapPhysicalRangeResponseFut {
15593        fn _decode(
15594            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15595        ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15596            let _response = fidl::client::decode_transaction_body::<
15597                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15598                fidl::encoding::DefaultFuchsiaResourceDialect,
15599                0x35f695b9b6c7217a,
15600            >(_buf?)?
15601            .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15602            Ok(_response.map(|x| x))
15603        }
15604        self.client.send_query_and_decode::<
15605            SecureMemAddSecureHeapPhysicalRangeRequest,
15606            SecureMemAddSecureHeapPhysicalRangeResult,
15607        >(
15608            payload,
15609            0x35f695b9b6c7217a,
15610            fidl::encoding::DynamicFlags::FLEXIBLE,
15611            _decode,
15612        )
15613    }
15614
15615    type DeleteSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15616        SecureMemDeleteSecureHeapPhysicalRangeResult,
15617        fidl::encoding::DefaultFuchsiaResourceDialect,
15618    >;
15619    fn r#delete_secure_heap_physical_range(
15620        &self,
15621        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15622    ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut {
15623        fn _decode(
15624            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15625        ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15626            let _response = fidl::client::decode_transaction_body::<
15627                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15628                fidl::encoding::DefaultFuchsiaResourceDialect,
15629                0xeaa58c650264c9e,
15630            >(_buf?)?
15631            .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15632            Ok(_response.map(|x| x))
15633        }
15634        self.client.send_query_and_decode::<
15635            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15636            SecureMemDeleteSecureHeapPhysicalRangeResult,
15637        >(
15638            payload,
15639            0xeaa58c650264c9e,
15640            fidl::encoding::DynamicFlags::FLEXIBLE,
15641            _decode,
15642        )
15643    }
15644
15645    type ModifySecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15646        SecureMemModifySecureHeapPhysicalRangeResult,
15647        fidl::encoding::DefaultFuchsiaResourceDialect,
15648    >;
15649    fn r#modify_secure_heap_physical_range(
15650        &self,
15651        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15652    ) -> Self::ModifySecureHeapPhysicalRangeResponseFut {
15653        fn _decode(
15654            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15655        ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15656            let _response = fidl::client::decode_transaction_body::<
15657                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15658                fidl::encoding::DefaultFuchsiaResourceDialect,
15659                0x60b7448aa1187734,
15660            >(_buf?)?
15661            .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15662            Ok(_response.map(|x| x))
15663        }
15664        self.client.send_query_and_decode::<
15665            SecureMemModifySecureHeapPhysicalRangeRequest,
15666            SecureMemModifySecureHeapPhysicalRangeResult,
15667        >(
15668            payload,
15669            0x60b7448aa1187734,
15670            fidl::encoding::DynamicFlags::FLEXIBLE,
15671            _decode,
15672        )
15673    }
15674
15675    type ZeroSubRangeResponseFut = fidl::client::QueryResponseFut<
15676        SecureMemZeroSubRangeResult,
15677        fidl::encoding::DefaultFuchsiaResourceDialect,
15678    >;
15679    fn r#zero_sub_range(
15680        &self,
15681        mut payload: &SecureMemZeroSubRangeRequest,
15682    ) -> Self::ZeroSubRangeResponseFut {
15683        fn _decode(
15684            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15685        ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15686            let _response = fidl::client::decode_transaction_body::<
15687                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15688                fidl::encoding::DefaultFuchsiaResourceDialect,
15689                0x5b25b7901a385ce5,
15690            >(_buf?)?
15691            .into_result::<SecureMemMarker>("zero_sub_range")?;
15692            Ok(_response.map(|x| x))
15693        }
15694        self.client
15695            .send_query_and_decode::<SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResult>(
15696                payload,
15697                0x5b25b7901a385ce5,
15698                fidl::encoding::DynamicFlags::FLEXIBLE,
15699                _decode,
15700            )
15701    }
15702}
15703
15704pub struct SecureMemEventStream {
15705    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
15706}
15707
15708impl std::marker::Unpin for SecureMemEventStream {}
15709
15710impl futures::stream::FusedStream for SecureMemEventStream {
15711    fn is_terminated(&self) -> bool {
15712        self.event_receiver.is_terminated()
15713    }
15714}
15715
15716impl futures::Stream for SecureMemEventStream {
15717    type Item = Result<SecureMemEvent, fidl::Error>;
15718
15719    fn poll_next(
15720        mut self: std::pin::Pin<&mut Self>,
15721        cx: &mut std::task::Context<'_>,
15722    ) -> std::task::Poll<Option<Self::Item>> {
15723        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
15724            &mut self.event_receiver,
15725            cx
15726        )?) {
15727            Some(buf) => std::task::Poll::Ready(Some(SecureMemEvent::decode(buf))),
15728            None => std::task::Poll::Ready(None),
15729        }
15730    }
15731}
15732
15733#[derive(Debug)]
15734pub enum SecureMemEvent {
15735    #[non_exhaustive]
15736    _UnknownEvent {
15737        /// Ordinal of the event that was sent.
15738        ordinal: u64,
15739    },
15740}
15741
15742impl SecureMemEvent {
15743    /// Decodes a message buffer as a [`SecureMemEvent`].
15744    fn decode(
15745        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
15746    ) -> Result<SecureMemEvent, fidl::Error> {
15747        let (bytes, _handles) = buf.split_mut();
15748        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15749        debug_assert_eq!(tx_header.tx_id, 0);
15750        match tx_header.ordinal {
15751            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
15752                Ok(SecureMemEvent::_UnknownEvent { ordinal: tx_header.ordinal })
15753            }
15754            _ => Err(fidl::Error::UnknownOrdinal {
15755                ordinal: tx_header.ordinal,
15756                protocol_name: <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15757            }),
15758        }
15759    }
15760}
15761
15762/// A Stream of incoming requests for fuchsia.sysmem2/SecureMem.
15763pub struct SecureMemRequestStream {
15764    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15765    is_terminated: bool,
15766}
15767
15768impl std::marker::Unpin for SecureMemRequestStream {}
15769
15770impl futures::stream::FusedStream for SecureMemRequestStream {
15771    fn is_terminated(&self) -> bool {
15772        self.is_terminated
15773    }
15774}
15775
15776impl fidl::endpoints::RequestStream for SecureMemRequestStream {
15777    type Protocol = SecureMemMarker;
15778    type ControlHandle = SecureMemControlHandle;
15779
15780    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
15781        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
15782    }
15783
15784    fn control_handle(&self) -> Self::ControlHandle {
15785        SecureMemControlHandle { inner: self.inner.clone() }
15786    }
15787
15788    fn into_inner(
15789        self,
15790    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
15791    {
15792        (self.inner, self.is_terminated)
15793    }
15794
15795    fn from_inner(
15796        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15797        is_terminated: bool,
15798    ) -> Self {
15799        Self { inner, is_terminated }
15800    }
15801}
15802
15803impl futures::Stream for SecureMemRequestStream {
15804    type Item = Result<SecureMemRequest, fidl::Error>;
15805
15806    fn poll_next(
15807        mut self: std::pin::Pin<&mut Self>,
15808        cx: &mut std::task::Context<'_>,
15809    ) -> std::task::Poll<Option<Self::Item>> {
15810        let this = &mut *self;
15811        if this.inner.check_shutdown(cx) {
15812            this.is_terminated = true;
15813            return std::task::Poll::Ready(None);
15814        }
15815        if this.is_terminated {
15816            panic!("polled SecureMemRequestStream after completion");
15817        }
15818        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
15819            |bytes, handles| {
15820                match this.inner.channel().read_etc(cx, bytes, handles) {
15821                    std::task::Poll::Ready(Ok(())) => {}
15822                    std::task::Poll::Pending => return std::task::Poll::Pending,
15823                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
15824                        this.is_terminated = true;
15825                        return std::task::Poll::Ready(None);
15826                    }
15827                    std::task::Poll::Ready(Err(e)) => {
15828                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
15829                            e.into(),
15830                        ))));
15831                    }
15832                }
15833
15834                // A message has been received from the channel
15835                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15836
15837                std::task::Poll::Ready(Some(match header.ordinal {
15838                    0x38716300592073e3 => {
15839                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15840                        let mut req = fidl::new_empty!(
15841                            fidl::encoding::EmptyPayload,
15842                            fidl::encoding::DefaultFuchsiaResourceDialect
15843                        );
15844                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15845                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15846                        Ok(SecureMemRequest::GetPhysicalSecureHeaps {
15847                            responder: SecureMemGetPhysicalSecureHeapsResponder {
15848                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15849                                tx_id: header.tx_id,
15850                            },
15851                        })
15852                    }
15853                    0x1190847f99952834 => {
15854                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15855                        let mut req = fidl::new_empty!(
15856                            fidl::encoding::EmptyPayload,
15857                            fidl::encoding::DefaultFuchsiaResourceDialect
15858                        );
15859                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15860                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15861                        Ok(SecureMemRequest::GetDynamicSecureHeaps {
15862                            responder: SecureMemGetDynamicSecureHeapsResponder {
15863                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15864                                tx_id: header.tx_id,
15865                            },
15866                        })
15867                    }
15868                    0xc6f06889009c7bc => {
15869                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15870                        let mut req = fidl::new_empty!(
15871                            SecureMemGetPhysicalSecureHeapPropertiesRequest,
15872                            fidl::encoding::DefaultFuchsiaResourceDialect
15873                        );
15874                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemGetPhysicalSecureHeapPropertiesRequest>(&header, _body_bytes, handles, &mut req)?;
15875                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15876                        Ok(SecureMemRequest::GetPhysicalSecureHeapProperties {
15877                            payload: req,
15878                            responder: SecureMemGetPhysicalSecureHeapPropertiesResponder {
15879                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15880                                tx_id: header.tx_id,
15881                            },
15882                        })
15883                    }
15884                    0x35f695b9b6c7217a => {
15885                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15886                        let mut req = fidl::new_empty!(
15887                            SecureMemAddSecureHeapPhysicalRangeRequest,
15888                            fidl::encoding::DefaultFuchsiaResourceDialect
15889                        );
15890                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemAddSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15891                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15892                        Ok(SecureMemRequest::AddSecureHeapPhysicalRange {
15893                            payload: req,
15894                            responder: SecureMemAddSecureHeapPhysicalRangeResponder {
15895                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15896                                tx_id: header.tx_id,
15897                            },
15898                        })
15899                    }
15900                    0xeaa58c650264c9e => {
15901                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15902                        let mut req = fidl::new_empty!(
15903                            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15904                            fidl::encoding::DefaultFuchsiaResourceDialect
15905                        );
15906                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemDeleteSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15907                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15908                        Ok(SecureMemRequest::DeleteSecureHeapPhysicalRange {
15909                            payload: req,
15910                            responder: SecureMemDeleteSecureHeapPhysicalRangeResponder {
15911                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15912                                tx_id: header.tx_id,
15913                            },
15914                        })
15915                    }
15916                    0x60b7448aa1187734 => {
15917                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15918                        let mut req = fidl::new_empty!(
15919                            SecureMemModifySecureHeapPhysicalRangeRequest,
15920                            fidl::encoding::DefaultFuchsiaResourceDialect
15921                        );
15922                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemModifySecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15923                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15924                        Ok(SecureMemRequest::ModifySecureHeapPhysicalRange {
15925                            payload: req,
15926                            responder: SecureMemModifySecureHeapPhysicalRangeResponder {
15927                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15928                                tx_id: header.tx_id,
15929                            },
15930                        })
15931                    }
15932                    0x5b25b7901a385ce5 => {
15933                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15934                        let mut req = fidl::new_empty!(
15935                            SecureMemZeroSubRangeRequest,
15936                            fidl::encoding::DefaultFuchsiaResourceDialect
15937                        );
15938                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemZeroSubRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15939                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15940                        Ok(SecureMemRequest::ZeroSubRange {
15941                            payload: req,
15942                            responder: SecureMemZeroSubRangeResponder {
15943                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15944                                tx_id: header.tx_id,
15945                            },
15946                        })
15947                    }
15948                    _ if header.tx_id == 0
15949                        && header
15950                            .dynamic_flags()
15951                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15952                    {
15953                        Ok(SecureMemRequest::_UnknownMethod {
15954                            ordinal: header.ordinal,
15955                            control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15956                            method_type: fidl::MethodType::OneWay,
15957                        })
15958                    }
15959                    _ if header
15960                        .dynamic_flags()
15961                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15962                    {
15963                        this.inner.send_framework_err(
15964                            fidl::encoding::FrameworkErr::UnknownMethod,
15965                            header.tx_id,
15966                            header.ordinal,
15967                            header.dynamic_flags(),
15968                            (bytes, handles),
15969                        )?;
15970                        Ok(SecureMemRequest::_UnknownMethod {
15971                            ordinal: header.ordinal,
15972                            control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15973                            method_type: fidl::MethodType::TwoWay,
15974                        })
15975                    }
15976                    _ => Err(fidl::Error::UnknownOrdinal {
15977                        ordinal: header.ordinal,
15978                        protocol_name:
15979                            <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15980                    }),
15981                }))
15982            },
15983        )
15984    }
15985}
15986
15987/// SecureMem
15988///
15989/// The client is sysmem.  The server is securemem driver.
15990///
15991/// TEE - Trusted Execution Environment.
15992///
15993/// REE - Rich Execution Environment.
15994///
15995/// Enables sysmem to call the securemem driver to get any secure heaps
15996/// configured via the TEE (or via the securemem driver), and set any physical
15997/// secure heaps configured via sysmem.
15998///
15999/// Presently, dynamically-allocated secure heaps are configured via sysmem, as
16000/// it starts quite early during boot and can successfully reserve contiguous
16001/// physical memory.  Presently, fixed-location secure heaps are configured via
16002/// TEE, as the plumbing goes from the bootloader to the TEE.  However, this
16003/// protocol intentionally doesn't care which heaps are dynamically-allocated
16004/// and which are fixed-location.
16005#[derive(Debug)]
16006pub enum SecureMemRequest {
16007    /// Gets the physical address and length of any secure heap whose physical
16008    /// range is configured via the TEE.
16009    ///
16010    /// Presently, these will be fixed physical addresses and lengths, with the
16011    /// location plumbed via the TEE.
16012    ///
16013    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
16014    /// when there isn't any special heap-specific per-VMO setup or teardown
16015    /// required.
16016    ///
16017    /// The physical range must be secured/protected by the TEE before the
16018    /// securemem driver responds to this request with success.
16019    ///
16020    /// Sysmem should only call this once.  Returning zero heaps is not a
16021    /// failure.
16022    ///
16023    /// Errors:
16024    ///  * PROTOCOL_DEVIATION - called more than once.
16025    ///  * UNSPECIFIED - generic internal error (such as in communication
16026    ///    with TEE which doesn't generate zx_status_t errors).
16027    ///  * other errors are allowed; any other errors should be treated the same
16028    ///    as UNSPECIFIED.
16029    GetPhysicalSecureHeaps { responder: SecureMemGetPhysicalSecureHeapsResponder },
16030    /// Gets information about any secure heaps whose physical pages are not
16031    /// configured by the TEE, but by sysmem.
16032    ///
16033    /// Sysmem should only call this once. Returning zero heaps is not a
16034    /// failure.
16035    ///
16036    /// Errors:
16037    ///  * PROTOCOL_DEVIATION - called more than once.
16038    ///  * UNSPECIFIED - generic internal error (such as in communication
16039    ///    with TEE which doesn't generate zx_status_t errors).
16040    ///  * other errors are allowed; any other errors should be treated the same
16041    ///    as UNSPECIFIED.
16042    GetDynamicSecureHeaps { responder: SecureMemGetDynamicSecureHeapsResponder },
16043    /// This request from sysmem to the securemem driver gets the properties of
16044    /// a protected/secure heap.
16045    ///
16046    /// This only handles heaps with a single contiguous physical extent.
16047    ///
16048    /// The heap's entire physical range is indicated in case this request needs
16049    /// some physical space to auto-detect how many ranges are REE-usable.  Any
16050    /// temporary HW protection ranges will be deleted before this request
16051    /// completes.
16052    ///
16053    /// Errors:
16054    ///  * UNSPECIFIED - generic internal error (such as in communication
16055    ///    with TEE which doesn't generate zx_status_t errors).
16056    ///  * other errors are allowed; any other errors should be treated the same
16057    ///    as UNSPECIFIED.
16058    GetPhysicalSecureHeapProperties {
16059        payload: SecureMemGetPhysicalSecureHeapPropertiesRequest,
16060        responder: SecureMemGetPhysicalSecureHeapPropertiesResponder,
16061    },
16062    /// This request from sysmem to the securemem driver conveys a physical
16063    /// range to add, for a heap whose physical range(s) are set up via
16064    /// sysmem.
16065    ///
16066    /// Only sysmem can call this because only sysmem is handed the client end
16067    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
16068    /// securemem driver is the server end of this protocol.
16069    ///
16070    /// The securemem driver must configure all the covered offsets as protected
16071    /// before responding to this message with success.
16072    ///
16073    /// On failure, the securemem driver must ensure the protected range was not
16074    /// created.
16075    ///
16076    /// Sysmem must only call this up to once if dynamic_protection_ranges
16077    /// false.
16078    ///
16079    /// If dynamic_protection_ranges is true, sysmem can call this multiple
16080    /// times as long as the current number of ranges never exceeds
16081    /// max_protected_range_count.
16082    ///
16083    /// The caller must not attempt to add a range that matches an
16084    /// already-existing range.  Added ranges can overlap each other as long as
16085    /// no two ranges match exactly.
16086    ///
16087    /// Errors:
16088    ///   * PROTOCOL_DEVIATION - called more than once when
16089    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
16090    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
16091    ///     range that doesn't conform to protected_range_granularity. See log.
16092    ///   * UNSPECIFIED - generic internal error (such as in communication
16093    ///     with TEE which doesn't generate zx_status_t errors).
16094    ///   * other errors are possible, such as from communication failures or
16095    ///     server propagation of failures.
16096    AddSecureHeapPhysicalRange {
16097        payload: SecureMemAddSecureHeapPhysicalRangeRequest,
16098        responder: SecureMemAddSecureHeapPhysicalRangeResponder,
16099    },
16100    /// This request from sysmem to the securemem driver conveys a physical
16101    /// range to delete, for a heap whose physical range(s) are set up via
16102    /// sysmem.
16103    ///
16104    /// Only sysmem can call this because only sysmem is handed the client end
16105    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
16106    /// securemem driver is the server end of this protocol.
16107    ///
16108    /// The securemem driver must configure all the covered offsets as not
16109    /// protected before responding to this message with success.
16110    ///
16111    /// On failure, the securemem driver must ensure the protected range was not
16112    /// deleted.
16113    ///
16114    /// Sysmem must not call this if dynamic_protection_ranges false.
16115    ///
16116    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16117    /// on various ranges that exist at the time of the call.
16118    ///
16119    /// If any portion of the range being deleted is not also covered by another
16120    /// protected range, then any ongoing DMA to any part of the entire range
16121    /// may be interrupted / may fail, potentially in a way that's disruptive to
16122    /// the entire system (bus lockup or similar, depending on device details).
16123    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
16124    /// any portion of the range being deleted, unless the caller has other
16125    /// active ranges covering every block of the range being deleted.  Ongoing
16126    /// DMA to/from blocks outside the range being deleted is never impacted by
16127    /// the deletion.
16128    ///
16129    /// Errors:
16130    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16131    ///     Unexpected heap, or range that doesn't conform to
16132    ///     protected_range_granularity.
16133    ///   * UNSPECIFIED - generic internal error (such as in communication
16134    ///     with TEE which doesn't generate zx_status_t errors).
16135    ///   * NOT_FOUND - the specified range is not found.
16136    ///   * other errors are possible, such as from communication failures or
16137    ///     server propagation of failures.
16138    DeleteSecureHeapPhysicalRange {
16139        payload: SecureMemDeleteSecureHeapPhysicalRangeRequest,
16140        responder: SecureMemDeleteSecureHeapPhysicalRangeResponder,
16141    },
16142    /// This request from sysmem to the securemem driver conveys a physical
16143    /// range to modify and its new base and length, for a heap whose physical
16144    /// range(s) are set up via sysmem.
16145    ///
16146    /// Only sysmem can call this because only sysmem is handed the client end
16147    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
16148    /// securemem driver is the server end of this protocol.
16149    ///
16150    /// The securemem driver must configure the range to cover only the new
16151    /// offsets before responding to this message with success.
16152    ///
16153    /// On failure, the securemem driver must ensure the range was not changed.
16154    ///
16155    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
16156    /// must not call this if !is_mod_protected_range_available.
16157    ///
16158    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16159    /// on various ranges that exist at the time of the call.
16160    ///
16161    /// The range must only be modified at one end or the other, but not both.
16162    /// If the range is getting shorter, and the un-covered blocks are not
16163    /// covered by other active ranges, any ongoing DMA to the entire range
16164    /// that's geting shorter may fail in a way that disrupts the entire system
16165    /// (bus lockup or similar), so the caller must ensure that no DMA is
16166    /// ongoing to any portion of a range that is getting shorter, unless the
16167    /// blocks being un-covered by the modification to this range are all
16168    /// covered by other active ranges, in which case no disruption to ongoing
16169    /// DMA will occur.
16170    ///
16171    /// If a range is modified to become <= zero length, the range is deleted.
16172    ///
16173    /// Errors:
16174    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16175    ///     Unexpected heap, or old_range or new_range that doesn't conform to
16176    ///     protected_range_granularity, or old_range and new_range differ in
16177    ///     both begin and end (disallowed).
16178    ///   * UNSPECIFIED - generic internal error (such as in communication
16179    ///     with TEE which doesn't generate zx_status_t errors).
16180    ///   * NOT_FOUND - the specified range is not found.
16181    ///   * other errors are possible, such as from communication failures or
16182    ///     server propagation of failures.
16183    ModifySecureHeapPhysicalRange {
16184        payload: SecureMemModifySecureHeapPhysicalRangeRequest,
16185        responder: SecureMemModifySecureHeapPhysicalRangeResponder,
16186    },
16187    /// Zero a sub-range of a currently-existing physical range added via
16188    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
16189    /// exactly one physical range, and must not overlap with any other
16190    /// physical range.
16191    ///
16192    /// is_covering_range_explicit - When true, the covering range must be one
16193    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
16194    ///     possibly modified since.  When false, the covering range must not
16195    ///     be one of the ranges explicitly created via
16196    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
16197    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
16198    ///     covering range is typically the entire physical range (or a range
16199    ///     which covers even more) of a heap configured by the TEE and whose
16200    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
16201    ///
16202    /// Ongoing DMA is not disrupted by this request.
16203    ///
16204    /// Errors:
16205    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16206    ///     Unexpected heap.
16207    ///   * UNSPECIFIED - generic internal error (such as in communication
16208    ///     with TEE which doesn't generate zx_status_t errors).
16209    ///   * other errors are possible, such as from communication failures or
16210    ///     server propagation of failures.
16211    ZeroSubRange {
16212        payload: SecureMemZeroSubRangeRequest,
16213        responder: SecureMemZeroSubRangeResponder,
16214    },
16215    /// An interaction was received which does not match any known method.
16216    #[non_exhaustive]
16217    _UnknownMethod {
16218        /// Ordinal of the method that was called.
16219        ordinal: u64,
16220        control_handle: SecureMemControlHandle,
16221        method_type: fidl::MethodType,
16222    },
16223}
16224
16225impl SecureMemRequest {
16226    #[allow(irrefutable_let_patterns)]
16227    pub fn into_get_physical_secure_heaps(
16228        self,
16229    ) -> Option<(SecureMemGetPhysicalSecureHeapsResponder)> {
16230        if let SecureMemRequest::GetPhysicalSecureHeaps { responder } = self {
16231            Some((responder))
16232        } else {
16233            None
16234        }
16235    }
16236
16237    #[allow(irrefutable_let_patterns)]
16238    pub fn into_get_dynamic_secure_heaps(
16239        self,
16240    ) -> Option<(SecureMemGetDynamicSecureHeapsResponder)> {
16241        if let SecureMemRequest::GetDynamicSecureHeaps { responder } = self {
16242            Some((responder))
16243        } else {
16244            None
16245        }
16246    }
16247
16248    #[allow(irrefutable_let_patterns)]
16249    pub fn into_get_physical_secure_heap_properties(
16250        self,
16251    ) -> Option<(
16252        SecureMemGetPhysicalSecureHeapPropertiesRequest,
16253        SecureMemGetPhysicalSecureHeapPropertiesResponder,
16254    )> {
16255        if let SecureMemRequest::GetPhysicalSecureHeapProperties { payload, responder } = self {
16256            Some((payload, responder))
16257        } else {
16258            None
16259        }
16260    }
16261
16262    #[allow(irrefutable_let_patterns)]
16263    pub fn into_add_secure_heap_physical_range(
16264        self,
16265    ) -> Option<(
16266        SecureMemAddSecureHeapPhysicalRangeRequest,
16267        SecureMemAddSecureHeapPhysicalRangeResponder,
16268    )> {
16269        if let SecureMemRequest::AddSecureHeapPhysicalRange { payload, responder } = self {
16270            Some((payload, responder))
16271        } else {
16272            None
16273        }
16274    }
16275
16276    #[allow(irrefutable_let_patterns)]
16277    pub fn into_delete_secure_heap_physical_range(
16278        self,
16279    ) -> Option<(
16280        SecureMemDeleteSecureHeapPhysicalRangeRequest,
16281        SecureMemDeleteSecureHeapPhysicalRangeResponder,
16282    )> {
16283        if let SecureMemRequest::DeleteSecureHeapPhysicalRange { payload, responder } = self {
16284            Some((payload, responder))
16285        } else {
16286            None
16287        }
16288    }
16289
16290    #[allow(irrefutable_let_patterns)]
16291    pub fn into_modify_secure_heap_physical_range(
16292        self,
16293    ) -> Option<(
16294        SecureMemModifySecureHeapPhysicalRangeRequest,
16295        SecureMemModifySecureHeapPhysicalRangeResponder,
16296    )> {
16297        if let SecureMemRequest::ModifySecureHeapPhysicalRange { payload, responder } = self {
16298            Some((payload, responder))
16299        } else {
16300            None
16301        }
16302    }
16303
16304    #[allow(irrefutable_let_patterns)]
16305    pub fn into_zero_sub_range(
16306        self,
16307    ) -> Option<(SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResponder)> {
16308        if let SecureMemRequest::ZeroSubRange { payload, responder } = self {
16309            Some((payload, responder))
16310        } else {
16311            None
16312        }
16313    }
16314
16315    /// Name of the method defined in FIDL
16316    pub fn method_name(&self) -> &'static str {
16317        match *self {
16318            SecureMemRequest::GetPhysicalSecureHeaps { .. } => "get_physical_secure_heaps",
16319            SecureMemRequest::GetDynamicSecureHeaps { .. } => "get_dynamic_secure_heaps",
16320            SecureMemRequest::GetPhysicalSecureHeapProperties { .. } => {
16321                "get_physical_secure_heap_properties"
16322            }
16323            SecureMemRequest::AddSecureHeapPhysicalRange { .. } => "add_secure_heap_physical_range",
16324            SecureMemRequest::DeleteSecureHeapPhysicalRange { .. } => {
16325                "delete_secure_heap_physical_range"
16326            }
16327            SecureMemRequest::ModifySecureHeapPhysicalRange { .. } => {
16328                "modify_secure_heap_physical_range"
16329            }
16330            SecureMemRequest::ZeroSubRange { .. } => "zero_sub_range",
16331            SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
16332                "unknown one-way method"
16333            }
16334            SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
16335                "unknown two-way method"
16336            }
16337        }
16338    }
16339}
16340
16341#[derive(Debug, Clone)]
16342pub struct SecureMemControlHandle {
16343    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
16344}
16345
16346impl fidl::endpoints::ControlHandle for SecureMemControlHandle {
16347    fn shutdown(&self) {
16348        self.inner.shutdown()
16349    }
16350    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
16351        self.inner.shutdown_with_epitaph(status)
16352    }
16353
16354    fn is_closed(&self) -> bool {
16355        self.inner.channel().is_closed()
16356    }
16357    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
16358        self.inner.channel().on_closed()
16359    }
16360
16361    #[cfg(target_os = "fuchsia")]
16362    fn signal_peer(
16363        &self,
16364        clear_mask: zx::Signals,
16365        set_mask: zx::Signals,
16366    ) -> Result<(), zx_status::Status> {
16367        use fidl::Peered;
16368        self.inner.channel().signal_peer(clear_mask, set_mask)
16369    }
16370}
16371
16372impl SecureMemControlHandle {}
16373
16374#[must_use = "FIDL methods require a response to be sent"]
16375#[derive(Debug)]
16376pub struct SecureMemGetPhysicalSecureHeapsResponder {
16377    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16378    tx_id: u32,
16379}
16380
16381/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16382/// if the responder is dropped without sending a response, so that the client
16383/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16384impl std::ops::Drop for SecureMemGetPhysicalSecureHeapsResponder {
16385    fn drop(&mut self) {
16386        self.control_handle.shutdown();
16387        // Safety: drops once, never accessed again
16388        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16389    }
16390}
16391
16392impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapsResponder {
16393    type ControlHandle = SecureMemControlHandle;
16394
16395    fn control_handle(&self) -> &SecureMemControlHandle {
16396        &self.control_handle
16397    }
16398
16399    fn drop_without_shutdown(mut self) {
16400        // Safety: drops once, never accessed again due to mem::forget
16401        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16402        // Prevent Drop from running (which would shut down the channel)
16403        std::mem::forget(self);
16404    }
16405}
16406
16407impl SecureMemGetPhysicalSecureHeapsResponder {
16408    /// Sends a response to the FIDL transaction.
16409    ///
16410    /// Sets the channel to shutdown if an error occurs.
16411    pub fn send(
16412        self,
16413        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16414    ) -> Result<(), fidl::Error> {
16415        let _result = self.send_raw(result);
16416        if _result.is_err() {
16417            self.control_handle.shutdown();
16418        }
16419        self.drop_without_shutdown();
16420        _result
16421    }
16422
16423    /// Similar to "send" but does not shutdown the channel if an error occurs.
16424    pub fn send_no_shutdown_on_err(
16425        self,
16426        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16427    ) -> Result<(), fidl::Error> {
16428        let _result = self.send_raw(result);
16429        self.drop_without_shutdown();
16430        _result
16431    }
16432
16433    fn send_raw(
16434        &self,
16435        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16436    ) -> Result<(), fidl::Error> {
16437        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16438            SecureMemGetPhysicalSecureHeapsResponse,
16439            Error,
16440        >>(
16441            fidl::encoding::FlexibleResult::new(result),
16442            self.tx_id,
16443            0x38716300592073e3,
16444            fidl::encoding::DynamicFlags::FLEXIBLE,
16445        )
16446    }
16447}
16448
16449#[must_use = "FIDL methods require a response to be sent"]
16450#[derive(Debug)]
16451pub struct SecureMemGetDynamicSecureHeapsResponder {
16452    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16453    tx_id: u32,
16454}
16455
16456/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16457/// if the responder is dropped without sending a response, so that the client
16458/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16459impl std::ops::Drop for SecureMemGetDynamicSecureHeapsResponder {
16460    fn drop(&mut self) {
16461        self.control_handle.shutdown();
16462        // Safety: drops once, never accessed again
16463        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16464    }
16465}
16466
16467impl fidl::endpoints::Responder for SecureMemGetDynamicSecureHeapsResponder {
16468    type ControlHandle = SecureMemControlHandle;
16469
16470    fn control_handle(&self) -> &SecureMemControlHandle {
16471        &self.control_handle
16472    }
16473
16474    fn drop_without_shutdown(mut self) {
16475        // Safety: drops once, never accessed again due to mem::forget
16476        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16477        // Prevent Drop from running (which would shut down the channel)
16478        std::mem::forget(self);
16479    }
16480}
16481
16482impl SecureMemGetDynamicSecureHeapsResponder {
16483    /// Sends a response to the FIDL transaction.
16484    ///
16485    /// Sets the channel to shutdown if an error occurs.
16486    pub fn send(
16487        self,
16488        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16489    ) -> Result<(), fidl::Error> {
16490        let _result = self.send_raw(result);
16491        if _result.is_err() {
16492            self.control_handle.shutdown();
16493        }
16494        self.drop_without_shutdown();
16495        _result
16496    }
16497
16498    /// Similar to "send" but does not shutdown the channel if an error occurs.
16499    pub fn send_no_shutdown_on_err(
16500        self,
16501        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16502    ) -> Result<(), fidl::Error> {
16503        let _result = self.send_raw(result);
16504        self.drop_without_shutdown();
16505        _result
16506    }
16507
16508    fn send_raw(
16509        &self,
16510        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16511    ) -> Result<(), fidl::Error> {
16512        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16513            SecureMemGetDynamicSecureHeapsResponse,
16514            Error,
16515        >>(
16516            fidl::encoding::FlexibleResult::new(result),
16517            self.tx_id,
16518            0x1190847f99952834,
16519            fidl::encoding::DynamicFlags::FLEXIBLE,
16520        )
16521    }
16522}
16523
16524#[must_use = "FIDL methods require a response to be sent"]
16525#[derive(Debug)]
16526pub struct SecureMemGetPhysicalSecureHeapPropertiesResponder {
16527    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16528    tx_id: u32,
16529}
16530
16531/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16532/// if the responder is dropped without sending a response, so that the client
16533/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16534impl std::ops::Drop for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16535    fn drop(&mut self) {
16536        self.control_handle.shutdown();
16537        // Safety: drops once, never accessed again
16538        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16539    }
16540}
16541
16542impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16543    type ControlHandle = SecureMemControlHandle;
16544
16545    fn control_handle(&self) -> &SecureMemControlHandle {
16546        &self.control_handle
16547    }
16548
16549    fn drop_without_shutdown(mut self) {
16550        // Safety: drops once, never accessed again due to mem::forget
16551        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16552        // Prevent Drop from running (which would shut down the channel)
16553        std::mem::forget(self);
16554    }
16555}
16556
16557impl SecureMemGetPhysicalSecureHeapPropertiesResponder {
16558    /// Sends a response to the FIDL transaction.
16559    ///
16560    /// Sets the channel to shutdown if an error occurs.
16561    pub fn send(
16562        self,
16563        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16564    ) -> Result<(), fidl::Error> {
16565        let _result = self.send_raw(result);
16566        if _result.is_err() {
16567            self.control_handle.shutdown();
16568        }
16569        self.drop_without_shutdown();
16570        _result
16571    }
16572
16573    /// Similar to "send" but does not shutdown the channel if an error occurs.
16574    pub fn send_no_shutdown_on_err(
16575        self,
16576        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16577    ) -> Result<(), fidl::Error> {
16578        let _result = self.send_raw(result);
16579        self.drop_without_shutdown();
16580        _result
16581    }
16582
16583    fn send_raw(
16584        &self,
16585        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16586    ) -> Result<(), fidl::Error> {
16587        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16588            SecureMemGetPhysicalSecureHeapPropertiesResponse,
16589            Error,
16590        >>(
16591            fidl::encoding::FlexibleResult::new(result),
16592            self.tx_id,
16593            0xc6f06889009c7bc,
16594            fidl::encoding::DynamicFlags::FLEXIBLE,
16595        )
16596    }
16597}
16598
16599#[must_use = "FIDL methods require a response to be sent"]
16600#[derive(Debug)]
16601pub struct SecureMemAddSecureHeapPhysicalRangeResponder {
16602    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16603    tx_id: u32,
16604}
16605
16606/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16607/// if the responder is dropped without sending a response, so that the client
16608/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16609impl std::ops::Drop for SecureMemAddSecureHeapPhysicalRangeResponder {
16610    fn drop(&mut self) {
16611        self.control_handle.shutdown();
16612        // Safety: drops once, never accessed again
16613        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16614    }
16615}
16616
16617impl fidl::endpoints::Responder for SecureMemAddSecureHeapPhysicalRangeResponder {
16618    type ControlHandle = SecureMemControlHandle;
16619
16620    fn control_handle(&self) -> &SecureMemControlHandle {
16621        &self.control_handle
16622    }
16623
16624    fn drop_without_shutdown(mut self) {
16625        // Safety: drops once, never accessed again due to mem::forget
16626        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16627        // Prevent Drop from running (which would shut down the channel)
16628        std::mem::forget(self);
16629    }
16630}
16631
16632impl SecureMemAddSecureHeapPhysicalRangeResponder {
16633    /// Sends a response to the FIDL transaction.
16634    ///
16635    /// Sets the channel to shutdown if an error occurs.
16636    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16637        let _result = self.send_raw(result);
16638        if _result.is_err() {
16639            self.control_handle.shutdown();
16640        }
16641        self.drop_without_shutdown();
16642        _result
16643    }
16644
16645    /// Similar to "send" but does not shutdown the channel if an error occurs.
16646    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16647        let _result = self.send_raw(result);
16648        self.drop_without_shutdown();
16649        _result
16650    }
16651
16652    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16653        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16654            fidl::encoding::EmptyStruct,
16655            Error,
16656        >>(
16657            fidl::encoding::FlexibleResult::new(result),
16658            self.tx_id,
16659            0x35f695b9b6c7217a,
16660            fidl::encoding::DynamicFlags::FLEXIBLE,
16661        )
16662    }
16663}
16664
16665#[must_use = "FIDL methods require a response to be sent"]
16666#[derive(Debug)]
16667pub struct SecureMemDeleteSecureHeapPhysicalRangeResponder {
16668    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16669    tx_id: u32,
16670}
16671
16672/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16673/// if the responder is dropped without sending a response, so that the client
16674/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16675impl std::ops::Drop for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16676    fn drop(&mut self) {
16677        self.control_handle.shutdown();
16678        // Safety: drops once, never accessed again
16679        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16680    }
16681}
16682
16683impl fidl::endpoints::Responder for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16684    type ControlHandle = SecureMemControlHandle;
16685
16686    fn control_handle(&self) -> &SecureMemControlHandle {
16687        &self.control_handle
16688    }
16689
16690    fn drop_without_shutdown(mut self) {
16691        // Safety: drops once, never accessed again due to mem::forget
16692        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16693        // Prevent Drop from running (which would shut down the channel)
16694        std::mem::forget(self);
16695    }
16696}
16697
16698impl SecureMemDeleteSecureHeapPhysicalRangeResponder {
16699    /// Sends a response to the FIDL transaction.
16700    ///
16701    /// Sets the channel to shutdown if an error occurs.
16702    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16703        let _result = self.send_raw(result);
16704        if _result.is_err() {
16705            self.control_handle.shutdown();
16706        }
16707        self.drop_without_shutdown();
16708        _result
16709    }
16710
16711    /// Similar to "send" but does not shutdown the channel if an error occurs.
16712    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16713        let _result = self.send_raw(result);
16714        self.drop_without_shutdown();
16715        _result
16716    }
16717
16718    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16719        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16720            fidl::encoding::EmptyStruct,
16721            Error,
16722        >>(
16723            fidl::encoding::FlexibleResult::new(result),
16724            self.tx_id,
16725            0xeaa58c650264c9e,
16726            fidl::encoding::DynamicFlags::FLEXIBLE,
16727        )
16728    }
16729}
16730
16731#[must_use = "FIDL methods require a response to be sent"]
16732#[derive(Debug)]
16733pub struct SecureMemModifySecureHeapPhysicalRangeResponder {
16734    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16735    tx_id: u32,
16736}
16737
16738/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16739/// if the responder is dropped without sending a response, so that the client
16740/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16741impl std::ops::Drop for SecureMemModifySecureHeapPhysicalRangeResponder {
16742    fn drop(&mut self) {
16743        self.control_handle.shutdown();
16744        // Safety: drops once, never accessed again
16745        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16746    }
16747}
16748
16749impl fidl::endpoints::Responder for SecureMemModifySecureHeapPhysicalRangeResponder {
16750    type ControlHandle = SecureMemControlHandle;
16751
16752    fn control_handle(&self) -> &SecureMemControlHandle {
16753        &self.control_handle
16754    }
16755
16756    fn drop_without_shutdown(mut self) {
16757        // Safety: drops once, never accessed again due to mem::forget
16758        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16759        // Prevent Drop from running (which would shut down the channel)
16760        std::mem::forget(self);
16761    }
16762}
16763
16764impl SecureMemModifySecureHeapPhysicalRangeResponder {
16765    /// Sends a response to the FIDL transaction.
16766    ///
16767    /// Sets the channel to shutdown if an error occurs.
16768    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16769        let _result = self.send_raw(result);
16770        if _result.is_err() {
16771            self.control_handle.shutdown();
16772        }
16773        self.drop_without_shutdown();
16774        _result
16775    }
16776
16777    /// Similar to "send" but does not shutdown the channel if an error occurs.
16778    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16779        let _result = self.send_raw(result);
16780        self.drop_without_shutdown();
16781        _result
16782    }
16783
16784    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16785        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16786            fidl::encoding::EmptyStruct,
16787            Error,
16788        >>(
16789            fidl::encoding::FlexibleResult::new(result),
16790            self.tx_id,
16791            0x60b7448aa1187734,
16792            fidl::encoding::DynamicFlags::FLEXIBLE,
16793        )
16794    }
16795}
16796
16797#[must_use = "FIDL methods require a response to be sent"]
16798#[derive(Debug)]
16799pub struct SecureMemZeroSubRangeResponder {
16800    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16801    tx_id: u32,
16802}
16803
16804/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16805/// if the responder is dropped without sending a response, so that the client
16806/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16807impl std::ops::Drop for SecureMemZeroSubRangeResponder {
16808    fn drop(&mut self) {
16809        self.control_handle.shutdown();
16810        // Safety: drops once, never accessed again
16811        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16812    }
16813}
16814
16815impl fidl::endpoints::Responder for SecureMemZeroSubRangeResponder {
16816    type ControlHandle = SecureMemControlHandle;
16817
16818    fn control_handle(&self) -> &SecureMemControlHandle {
16819        &self.control_handle
16820    }
16821
16822    fn drop_without_shutdown(mut self) {
16823        // Safety: drops once, never accessed again due to mem::forget
16824        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16825        // Prevent Drop from running (which would shut down the channel)
16826        std::mem::forget(self);
16827    }
16828}
16829
16830impl SecureMemZeroSubRangeResponder {
16831    /// Sends a response to the FIDL transaction.
16832    ///
16833    /// Sets the channel to shutdown if an error occurs.
16834    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16835        let _result = self.send_raw(result);
16836        if _result.is_err() {
16837            self.control_handle.shutdown();
16838        }
16839        self.drop_without_shutdown();
16840        _result
16841    }
16842
16843    /// Similar to "send" but does not shutdown the channel if an error occurs.
16844    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16845        let _result = self.send_raw(result);
16846        self.drop_without_shutdown();
16847        _result
16848    }
16849
16850    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16851        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16852            fidl::encoding::EmptyStruct,
16853            Error,
16854        >>(
16855            fidl::encoding::FlexibleResult::new(result),
16856            self.tx_id,
16857            0x5b25b7901a385ce5,
16858            fidl::encoding::DynamicFlags::FLEXIBLE,
16859        )
16860    }
16861}
16862
16863mod internal {
16864    use super::*;
16865
16866    impl AllocatorAllocateNonSharedCollectionRequest {
16867        #[inline(always)]
16868        fn max_ordinal_present(&self) -> u64 {
16869            if let Some(_) = self.collection_request {
16870                return 1;
16871            }
16872            0
16873        }
16874    }
16875
16876    impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16877        type Borrowed<'a> = &'a mut Self;
16878        fn take_or_borrow<'a>(
16879            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
16880        ) -> Self::Borrowed<'a> {
16881            value
16882        }
16883    }
16884
16885    unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16886        type Owned = Self;
16887
16888        #[inline(always)]
16889        fn inline_align(_context: fidl::encoding::Context) -> usize {
16890            8
16891        }
16892
16893        #[inline(always)]
16894        fn inline_size(_context: fidl::encoding::Context) -> usize {
16895            16
16896        }
16897    }
16898
16899    unsafe impl
16900        fidl::encoding::Encode<
16901            AllocatorAllocateNonSharedCollectionRequest,
16902            fidl::encoding::DefaultFuchsiaResourceDialect,
16903        > for &mut AllocatorAllocateNonSharedCollectionRequest
16904    {
16905        unsafe fn encode(
16906            self,
16907            encoder: &mut fidl::encoding::Encoder<
16908                '_,
16909                fidl::encoding::DefaultFuchsiaResourceDialect,
16910            >,
16911            offset: usize,
16912            mut depth: fidl::encoding::Depth,
16913        ) -> fidl::Result<()> {
16914            encoder.debug_check_bounds::<AllocatorAllocateNonSharedCollectionRequest>(offset);
16915            // Vector header
16916            let max_ordinal: u64 = self.max_ordinal_present();
16917            encoder.write_num(max_ordinal, offset);
16918            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
16919            // Calling encoder.out_of_line_offset(0) is not allowed.
16920            if max_ordinal == 0 {
16921                return Ok(());
16922            }
16923            depth.increment()?;
16924            let envelope_size = 8;
16925            let bytes_len = max_ordinal as usize * envelope_size;
16926            #[allow(unused_variables)]
16927            let offset = encoder.out_of_line_offset(bytes_len);
16928            let mut _prev_end_offset: usize = 0;
16929            if 1 > max_ordinal {
16930                return Ok(());
16931            }
16932
16933            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
16934            // are envelope_size bytes.
16935            let cur_offset: usize = (1 - 1) * envelope_size;
16936
16937            // Zero reserved fields.
16938            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
16939
16940            // Safety:
16941            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
16942            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
16943            //   envelope_size bytes, there is always sufficient room.
16944            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
16945            self.collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
16946            encoder, offset + cur_offset, depth
16947        )?;
16948
16949            _prev_end_offset = cur_offset + envelope_size;
16950
16951            Ok(())
16952        }
16953    }
16954
16955    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
16956        for AllocatorAllocateNonSharedCollectionRequest
16957    {
16958        #[inline(always)]
16959        fn new_empty() -> Self {
16960            Self::default()
16961        }
16962
16963        unsafe fn decode(
16964            &mut self,
16965            decoder: &mut fidl::encoding::Decoder<
16966                '_,
16967                fidl::encoding::DefaultFuchsiaResourceDialect,
16968            >,
16969            offset: usize,
16970            mut depth: fidl::encoding::Depth,
16971        ) -> fidl::Result<()> {
16972            decoder.debug_check_bounds::<Self>(offset);
16973            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
16974                None => return Err(fidl::Error::NotNullable),
16975                Some(len) => len,
16976            };
16977            // Calling decoder.out_of_line_offset(0) is not allowed.
16978            if len == 0 {
16979                return Ok(());
16980            };
16981            depth.increment()?;
16982            let envelope_size = 8;
16983            let bytes_len = len * envelope_size;
16984            let offset = decoder.out_of_line_offset(bytes_len)?;
16985            // Decode the envelope for each type.
16986            let mut _next_ordinal_to_read = 0;
16987            let mut next_offset = offset;
16988            let end_offset = offset + bytes_len;
16989            _next_ordinal_to_read += 1;
16990            if next_offset >= end_offset {
16991                return Ok(());
16992            }
16993
16994            // Decode unknown envelopes for gaps in ordinals.
16995            while _next_ordinal_to_read < 1 {
16996                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
16997                _next_ordinal_to_read += 1;
16998                next_offset += envelope_size;
16999            }
17000
17001            let next_out_of_line = decoder.next_out_of_line();
17002            let handles_before = decoder.remaining_handles();
17003            if let Some((inlined, num_bytes, num_handles)) =
17004                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17005            {
17006                let member_inline_size = <fidl::encoding::Endpoint<
17007                    fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17008                > as fidl::encoding::TypeMarker>::inline_size(
17009                    decoder.context
17010                );
17011                if inlined != (member_inline_size <= 4) {
17012                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17013                }
17014                let inner_offset;
17015                let mut inner_depth = depth.clone();
17016                if inlined {
17017                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17018                    inner_offset = next_offset;
17019                } else {
17020                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17021                    inner_depth.increment()?;
17022                }
17023                let val_ref = self.collection_request.get_or_insert_with(|| {
17024                    fidl::new_empty!(
17025                        fidl::encoding::Endpoint<
17026                            fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17027                        >,
17028                        fidl::encoding::DefaultFuchsiaResourceDialect
17029                    )
17030                });
17031                fidl::decode!(
17032                    fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17033                    fidl::encoding::DefaultFuchsiaResourceDialect,
17034                    val_ref,
17035                    decoder,
17036                    inner_offset,
17037                    inner_depth
17038                )?;
17039                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17040                {
17041                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17042                }
17043                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17044                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17045                }
17046            }
17047
17048            next_offset += envelope_size;
17049
17050            // Decode the remaining unknown envelopes.
17051            while next_offset < end_offset {
17052                _next_ordinal_to_read += 1;
17053                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17054                next_offset += envelope_size;
17055            }
17056
17057            Ok(())
17058        }
17059    }
17060
17061    impl AllocatorAllocateSharedCollectionRequest {
17062        #[inline(always)]
17063        fn max_ordinal_present(&self) -> u64 {
17064            if let Some(_) = self.token_request {
17065                return 1;
17066            }
17067            0
17068        }
17069    }
17070
17071    impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateSharedCollectionRequest {
17072        type Borrowed<'a> = &'a mut Self;
17073        fn take_or_borrow<'a>(
17074            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17075        ) -> Self::Borrowed<'a> {
17076            value
17077        }
17078    }
17079
17080    unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateSharedCollectionRequest {
17081        type Owned = Self;
17082
17083        #[inline(always)]
17084        fn inline_align(_context: fidl::encoding::Context) -> usize {
17085            8
17086        }
17087
17088        #[inline(always)]
17089        fn inline_size(_context: fidl::encoding::Context) -> usize {
17090            16
17091        }
17092    }
17093
17094    unsafe impl
17095        fidl::encoding::Encode<
17096            AllocatorAllocateSharedCollectionRequest,
17097            fidl::encoding::DefaultFuchsiaResourceDialect,
17098        > for &mut AllocatorAllocateSharedCollectionRequest
17099    {
17100        unsafe fn encode(
17101            self,
17102            encoder: &mut fidl::encoding::Encoder<
17103                '_,
17104                fidl::encoding::DefaultFuchsiaResourceDialect,
17105            >,
17106            offset: usize,
17107            mut depth: fidl::encoding::Depth,
17108        ) -> fidl::Result<()> {
17109            encoder.debug_check_bounds::<AllocatorAllocateSharedCollectionRequest>(offset);
17110            // Vector header
17111            let max_ordinal: u64 = self.max_ordinal_present();
17112            encoder.write_num(max_ordinal, offset);
17113            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17114            // Calling encoder.out_of_line_offset(0) is not allowed.
17115            if max_ordinal == 0 {
17116                return Ok(());
17117            }
17118            depth.increment()?;
17119            let envelope_size = 8;
17120            let bytes_len = max_ordinal as usize * envelope_size;
17121            #[allow(unused_variables)]
17122            let offset = encoder.out_of_line_offset(bytes_len);
17123            let mut _prev_end_offset: usize = 0;
17124            if 1 > max_ordinal {
17125                return Ok(());
17126            }
17127
17128            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17129            // are envelope_size bytes.
17130            let cur_offset: usize = (1 - 1) * envelope_size;
17131
17132            // Zero reserved fields.
17133            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17134
17135            // Safety:
17136            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17137            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17138            //   envelope_size bytes, there is always sufficient room.
17139            fidl::encoding::encode_in_envelope_optional::<
17140                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
17141                fidl::encoding::DefaultFuchsiaResourceDialect,
17142            >(
17143                self.token_request.as_mut().map(
17144                    <fidl::encoding::Endpoint<
17145                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17146                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17147                ),
17148                encoder,
17149                offset + cur_offset,
17150                depth,
17151            )?;
17152
17153            _prev_end_offset = cur_offset + envelope_size;
17154
17155            Ok(())
17156        }
17157    }
17158
17159    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17160        for AllocatorAllocateSharedCollectionRequest
17161    {
17162        #[inline(always)]
17163        fn new_empty() -> Self {
17164            Self::default()
17165        }
17166
17167        unsafe fn decode(
17168            &mut self,
17169            decoder: &mut fidl::encoding::Decoder<
17170                '_,
17171                fidl::encoding::DefaultFuchsiaResourceDialect,
17172            >,
17173            offset: usize,
17174            mut depth: fidl::encoding::Depth,
17175        ) -> fidl::Result<()> {
17176            decoder.debug_check_bounds::<Self>(offset);
17177            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17178                None => return Err(fidl::Error::NotNullable),
17179                Some(len) => len,
17180            };
17181            // Calling decoder.out_of_line_offset(0) is not allowed.
17182            if len == 0 {
17183                return Ok(());
17184            };
17185            depth.increment()?;
17186            let envelope_size = 8;
17187            let bytes_len = len * envelope_size;
17188            let offset = decoder.out_of_line_offset(bytes_len)?;
17189            // Decode the envelope for each type.
17190            let mut _next_ordinal_to_read = 0;
17191            let mut next_offset = offset;
17192            let end_offset = offset + bytes_len;
17193            _next_ordinal_to_read += 1;
17194            if next_offset >= end_offset {
17195                return Ok(());
17196            }
17197
17198            // Decode unknown envelopes for gaps in ordinals.
17199            while _next_ordinal_to_read < 1 {
17200                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17201                _next_ordinal_to_read += 1;
17202                next_offset += envelope_size;
17203            }
17204
17205            let next_out_of_line = decoder.next_out_of_line();
17206            let handles_before = decoder.remaining_handles();
17207            if let Some((inlined, num_bytes, num_handles)) =
17208                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17209            {
17210                let member_inline_size = <fidl::encoding::Endpoint<
17211                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17212                > as fidl::encoding::TypeMarker>::inline_size(
17213                    decoder.context
17214                );
17215                if inlined != (member_inline_size <= 4) {
17216                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17217                }
17218                let inner_offset;
17219                let mut inner_depth = depth.clone();
17220                if inlined {
17221                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17222                    inner_offset = next_offset;
17223                } else {
17224                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17225                    inner_depth.increment()?;
17226                }
17227                let val_ref = self.token_request.get_or_insert_with(|| {
17228                    fidl::new_empty!(
17229                        fidl::encoding::Endpoint<
17230                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17231                        >,
17232                        fidl::encoding::DefaultFuchsiaResourceDialect
17233                    )
17234                });
17235                fidl::decode!(
17236                    fidl::encoding::Endpoint<
17237                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17238                    >,
17239                    fidl::encoding::DefaultFuchsiaResourceDialect,
17240                    val_ref,
17241                    decoder,
17242                    inner_offset,
17243                    inner_depth
17244                )?;
17245                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17246                {
17247                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17248                }
17249                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17250                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17251                }
17252            }
17253
17254            next_offset += envelope_size;
17255
17256            // Decode the remaining unknown envelopes.
17257            while next_offset < end_offset {
17258                _next_ordinal_to_read += 1;
17259                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17260                next_offset += envelope_size;
17261            }
17262
17263            Ok(())
17264        }
17265    }
17266
17267    impl AllocatorBindSharedCollectionRequest {
17268        #[inline(always)]
17269        fn max_ordinal_present(&self) -> u64 {
17270            if let Some(_) = self.buffer_collection_request {
17271                return 2;
17272            }
17273            if let Some(_) = self.token {
17274                return 1;
17275            }
17276            0
17277        }
17278    }
17279
17280    impl fidl::encoding::ResourceTypeMarker for AllocatorBindSharedCollectionRequest {
17281        type Borrowed<'a> = &'a mut Self;
17282        fn take_or_borrow<'a>(
17283            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17284        ) -> Self::Borrowed<'a> {
17285            value
17286        }
17287    }
17288
17289    unsafe impl fidl::encoding::TypeMarker for AllocatorBindSharedCollectionRequest {
17290        type Owned = Self;
17291
17292        #[inline(always)]
17293        fn inline_align(_context: fidl::encoding::Context) -> usize {
17294            8
17295        }
17296
17297        #[inline(always)]
17298        fn inline_size(_context: fidl::encoding::Context) -> usize {
17299            16
17300        }
17301    }
17302
17303    unsafe impl
17304        fidl::encoding::Encode<
17305            AllocatorBindSharedCollectionRequest,
17306            fidl::encoding::DefaultFuchsiaResourceDialect,
17307        > for &mut AllocatorBindSharedCollectionRequest
17308    {
17309        unsafe fn encode(
17310            self,
17311            encoder: &mut fidl::encoding::Encoder<
17312                '_,
17313                fidl::encoding::DefaultFuchsiaResourceDialect,
17314            >,
17315            offset: usize,
17316            mut depth: fidl::encoding::Depth,
17317        ) -> fidl::Result<()> {
17318            encoder.debug_check_bounds::<AllocatorBindSharedCollectionRequest>(offset);
17319            // Vector header
17320            let max_ordinal: u64 = self.max_ordinal_present();
17321            encoder.write_num(max_ordinal, offset);
17322            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17323            // Calling encoder.out_of_line_offset(0) is not allowed.
17324            if max_ordinal == 0 {
17325                return Ok(());
17326            }
17327            depth.increment()?;
17328            let envelope_size = 8;
17329            let bytes_len = max_ordinal as usize * envelope_size;
17330            #[allow(unused_variables)]
17331            let offset = encoder.out_of_line_offset(bytes_len);
17332            let mut _prev_end_offset: usize = 0;
17333            if 1 > max_ordinal {
17334                return Ok(());
17335            }
17336
17337            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17338            // are envelope_size bytes.
17339            let cur_offset: usize = (1 - 1) * envelope_size;
17340
17341            // Zero reserved fields.
17342            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17343
17344            // Safety:
17345            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17346            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17347            //   envelope_size bytes, there is always sufficient room.
17348            fidl::encoding::encode_in_envelope_optional::<
17349                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
17350                fidl::encoding::DefaultFuchsiaResourceDialect,
17351            >(
17352                self.token.as_mut().map(
17353                    <fidl::encoding::Endpoint<
17354                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17355                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17356                ),
17357                encoder,
17358                offset + cur_offset,
17359                depth,
17360            )?;
17361
17362            _prev_end_offset = cur_offset + envelope_size;
17363            if 2 > max_ordinal {
17364                return Ok(());
17365            }
17366
17367            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17368            // are envelope_size bytes.
17369            let cur_offset: usize = (2 - 1) * envelope_size;
17370
17371            // Zero reserved fields.
17372            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17373
17374            // Safety:
17375            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17376            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17377            //   envelope_size bytes, there is always sufficient room.
17378            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
17379            self.buffer_collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
17380            encoder, offset + cur_offset, depth
17381        )?;
17382
17383            _prev_end_offset = cur_offset + envelope_size;
17384
17385            Ok(())
17386        }
17387    }
17388
17389    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17390        for AllocatorBindSharedCollectionRequest
17391    {
17392        #[inline(always)]
17393        fn new_empty() -> Self {
17394            Self::default()
17395        }
17396
17397        unsafe fn decode(
17398            &mut self,
17399            decoder: &mut fidl::encoding::Decoder<
17400                '_,
17401                fidl::encoding::DefaultFuchsiaResourceDialect,
17402            >,
17403            offset: usize,
17404            mut depth: fidl::encoding::Depth,
17405        ) -> fidl::Result<()> {
17406            decoder.debug_check_bounds::<Self>(offset);
17407            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17408                None => return Err(fidl::Error::NotNullable),
17409                Some(len) => len,
17410            };
17411            // Calling decoder.out_of_line_offset(0) is not allowed.
17412            if len == 0 {
17413                return Ok(());
17414            };
17415            depth.increment()?;
17416            let envelope_size = 8;
17417            let bytes_len = len * envelope_size;
17418            let offset = decoder.out_of_line_offset(bytes_len)?;
17419            // Decode the envelope for each type.
17420            let mut _next_ordinal_to_read = 0;
17421            let mut next_offset = offset;
17422            let end_offset = offset + bytes_len;
17423            _next_ordinal_to_read += 1;
17424            if next_offset >= end_offset {
17425                return Ok(());
17426            }
17427
17428            // Decode unknown envelopes for gaps in ordinals.
17429            while _next_ordinal_to_read < 1 {
17430                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17431                _next_ordinal_to_read += 1;
17432                next_offset += envelope_size;
17433            }
17434
17435            let next_out_of_line = decoder.next_out_of_line();
17436            let handles_before = decoder.remaining_handles();
17437            if let Some((inlined, num_bytes, num_handles)) =
17438                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17439            {
17440                let member_inline_size = <fidl::encoding::Endpoint<
17441                    fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17442                > as fidl::encoding::TypeMarker>::inline_size(
17443                    decoder.context
17444                );
17445                if inlined != (member_inline_size <= 4) {
17446                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17447                }
17448                let inner_offset;
17449                let mut inner_depth = depth.clone();
17450                if inlined {
17451                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17452                    inner_offset = next_offset;
17453                } else {
17454                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17455                    inner_depth.increment()?;
17456                }
17457                let val_ref = self.token.get_or_insert_with(|| {
17458                    fidl::new_empty!(
17459                        fidl::encoding::Endpoint<
17460                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17461                        >,
17462                        fidl::encoding::DefaultFuchsiaResourceDialect
17463                    )
17464                });
17465                fidl::decode!(
17466                    fidl::encoding::Endpoint<
17467                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17468                    >,
17469                    fidl::encoding::DefaultFuchsiaResourceDialect,
17470                    val_ref,
17471                    decoder,
17472                    inner_offset,
17473                    inner_depth
17474                )?;
17475                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17476                {
17477                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17478                }
17479                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17480                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17481                }
17482            }
17483
17484            next_offset += envelope_size;
17485            _next_ordinal_to_read += 1;
17486            if next_offset >= end_offset {
17487                return Ok(());
17488            }
17489
17490            // Decode unknown envelopes for gaps in ordinals.
17491            while _next_ordinal_to_read < 2 {
17492                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17493                _next_ordinal_to_read += 1;
17494                next_offset += envelope_size;
17495            }
17496
17497            let next_out_of_line = decoder.next_out_of_line();
17498            let handles_before = decoder.remaining_handles();
17499            if let Some((inlined, num_bytes, num_handles)) =
17500                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17501            {
17502                let member_inline_size = <fidl::encoding::Endpoint<
17503                    fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17504                > as fidl::encoding::TypeMarker>::inline_size(
17505                    decoder.context
17506                );
17507                if inlined != (member_inline_size <= 4) {
17508                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17509                }
17510                let inner_offset;
17511                let mut inner_depth = depth.clone();
17512                if inlined {
17513                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17514                    inner_offset = next_offset;
17515                } else {
17516                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17517                    inner_depth.increment()?;
17518                }
17519                let val_ref = self.buffer_collection_request.get_or_insert_with(|| {
17520                    fidl::new_empty!(
17521                        fidl::encoding::Endpoint<
17522                            fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17523                        >,
17524                        fidl::encoding::DefaultFuchsiaResourceDialect
17525                    )
17526                });
17527                fidl::decode!(
17528                    fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17529                    fidl::encoding::DefaultFuchsiaResourceDialect,
17530                    val_ref,
17531                    decoder,
17532                    inner_offset,
17533                    inner_depth
17534                )?;
17535                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17536                {
17537                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17538                }
17539                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17540                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17541                }
17542            }
17543
17544            next_offset += envelope_size;
17545
17546            // Decode the remaining unknown envelopes.
17547            while next_offset < end_offset {
17548                _next_ordinal_to_read += 1;
17549                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17550                next_offset += envelope_size;
17551            }
17552
17553            Ok(())
17554        }
17555    }
17556
17557    impl AllocatorGetVmoInfoRequest {
17558        #[inline(always)]
17559        fn max_ordinal_present(&self) -> u64 {
17560            if let Some(_) = self.vmo {
17561                return 1;
17562            }
17563            0
17564        }
17565    }
17566
17567    impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoRequest {
17568        type Borrowed<'a> = &'a mut Self;
17569        fn take_or_borrow<'a>(
17570            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17571        ) -> Self::Borrowed<'a> {
17572            value
17573        }
17574    }
17575
17576    unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoRequest {
17577        type Owned = Self;
17578
17579        #[inline(always)]
17580        fn inline_align(_context: fidl::encoding::Context) -> usize {
17581            8
17582        }
17583
17584        #[inline(always)]
17585        fn inline_size(_context: fidl::encoding::Context) -> usize {
17586            16
17587        }
17588    }
17589
17590    unsafe impl
17591        fidl::encoding::Encode<
17592            AllocatorGetVmoInfoRequest,
17593            fidl::encoding::DefaultFuchsiaResourceDialect,
17594        > for &mut AllocatorGetVmoInfoRequest
17595    {
17596        unsafe fn encode(
17597            self,
17598            encoder: &mut fidl::encoding::Encoder<
17599                '_,
17600                fidl::encoding::DefaultFuchsiaResourceDialect,
17601            >,
17602            offset: usize,
17603            mut depth: fidl::encoding::Depth,
17604        ) -> fidl::Result<()> {
17605            encoder.debug_check_bounds::<AllocatorGetVmoInfoRequest>(offset);
17606            // Vector header
17607            let max_ordinal: u64 = self.max_ordinal_present();
17608            encoder.write_num(max_ordinal, offset);
17609            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17610            // Calling encoder.out_of_line_offset(0) is not allowed.
17611            if max_ordinal == 0 {
17612                return Ok(());
17613            }
17614            depth.increment()?;
17615            let envelope_size = 8;
17616            let bytes_len = max_ordinal as usize * envelope_size;
17617            #[allow(unused_variables)]
17618            let offset = encoder.out_of_line_offset(bytes_len);
17619            let mut _prev_end_offset: usize = 0;
17620            if 1 > max_ordinal {
17621                return Ok(());
17622            }
17623
17624            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17625            // are envelope_size bytes.
17626            let cur_offset: usize = (1 - 1) * envelope_size;
17627
17628            // Zero reserved fields.
17629            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17630
17631            // Safety:
17632            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17633            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17634            //   envelope_size bytes, there is always sufficient room.
17635            fidl::encoding::encode_in_envelope_optional::<
17636                fidl::encoding::HandleType<
17637                    fidl::Vmo,
17638                    { fidl::ObjectType::VMO.into_raw() },
17639                    2147483648,
17640                >,
17641                fidl::encoding::DefaultFuchsiaResourceDialect,
17642            >(
17643                self.vmo.as_mut().map(
17644                    <fidl::encoding::HandleType<
17645                        fidl::Vmo,
17646                        { fidl::ObjectType::VMO.into_raw() },
17647                        2147483648,
17648                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17649                ),
17650                encoder,
17651                offset + cur_offset,
17652                depth,
17653            )?;
17654
17655            _prev_end_offset = cur_offset + envelope_size;
17656
17657            Ok(())
17658        }
17659    }
17660
17661    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17662        for AllocatorGetVmoInfoRequest
17663    {
17664        #[inline(always)]
17665        fn new_empty() -> Self {
17666            Self::default()
17667        }
17668
17669        unsafe fn decode(
17670            &mut self,
17671            decoder: &mut fidl::encoding::Decoder<
17672                '_,
17673                fidl::encoding::DefaultFuchsiaResourceDialect,
17674            >,
17675            offset: usize,
17676            mut depth: fidl::encoding::Depth,
17677        ) -> fidl::Result<()> {
17678            decoder.debug_check_bounds::<Self>(offset);
17679            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17680                None => return Err(fidl::Error::NotNullable),
17681                Some(len) => len,
17682            };
17683            // Calling decoder.out_of_line_offset(0) is not allowed.
17684            if len == 0 {
17685                return Ok(());
17686            };
17687            depth.increment()?;
17688            let envelope_size = 8;
17689            let bytes_len = len * envelope_size;
17690            let offset = decoder.out_of_line_offset(bytes_len)?;
17691            // Decode the envelope for each type.
17692            let mut _next_ordinal_to_read = 0;
17693            let mut next_offset = offset;
17694            let end_offset = offset + bytes_len;
17695            _next_ordinal_to_read += 1;
17696            if next_offset >= end_offset {
17697                return Ok(());
17698            }
17699
17700            // Decode unknown envelopes for gaps in ordinals.
17701            while _next_ordinal_to_read < 1 {
17702                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17703                _next_ordinal_to_read += 1;
17704                next_offset += envelope_size;
17705            }
17706
17707            let next_out_of_line = decoder.next_out_of_line();
17708            let handles_before = decoder.remaining_handles();
17709            if let Some((inlined, num_bytes, num_handles)) =
17710                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17711            {
17712                let member_inline_size = <fidl::encoding::HandleType<
17713                    fidl::Vmo,
17714                    { fidl::ObjectType::VMO.into_raw() },
17715                    2147483648,
17716                > as fidl::encoding::TypeMarker>::inline_size(
17717                    decoder.context
17718                );
17719                if inlined != (member_inline_size <= 4) {
17720                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17721                }
17722                let inner_offset;
17723                let mut inner_depth = depth.clone();
17724                if inlined {
17725                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17726                    inner_offset = next_offset;
17727                } else {
17728                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17729                    inner_depth.increment()?;
17730                }
17731                let val_ref =
17732                self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
17733                fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
17734                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17735                {
17736                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17737                }
17738                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17739                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17740                }
17741            }
17742
17743            next_offset += envelope_size;
17744
17745            // Decode the remaining unknown envelopes.
17746            while next_offset < end_offset {
17747                _next_ordinal_to_read += 1;
17748                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17749                next_offset += envelope_size;
17750            }
17751
17752            Ok(())
17753        }
17754    }
17755
17756    impl AllocatorGetVmoInfoResponse {
17757        #[inline(always)]
17758        fn max_ordinal_present(&self) -> u64 {
17759            if let Some(_) = self.close_weak_asap {
17760                return 3;
17761            }
17762            if let Some(_) = self.buffer_index {
17763                return 2;
17764            }
17765            if let Some(_) = self.buffer_collection_id {
17766                return 1;
17767            }
17768            0
17769        }
17770    }
17771
17772    impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoResponse {
17773        type Borrowed<'a> = &'a mut Self;
17774        fn take_or_borrow<'a>(
17775            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17776        ) -> Self::Borrowed<'a> {
17777            value
17778        }
17779    }
17780
17781    unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoResponse {
17782        type Owned = Self;
17783
17784        #[inline(always)]
17785        fn inline_align(_context: fidl::encoding::Context) -> usize {
17786            8
17787        }
17788
17789        #[inline(always)]
17790        fn inline_size(_context: fidl::encoding::Context) -> usize {
17791            16
17792        }
17793    }
17794
17795    unsafe impl
17796        fidl::encoding::Encode<
17797            AllocatorGetVmoInfoResponse,
17798            fidl::encoding::DefaultFuchsiaResourceDialect,
17799        > for &mut AllocatorGetVmoInfoResponse
17800    {
17801        unsafe fn encode(
17802            self,
17803            encoder: &mut fidl::encoding::Encoder<
17804                '_,
17805                fidl::encoding::DefaultFuchsiaResourceDialect,
17806            >,
17807            offset: usize,
17808            mut depth: fidl::encoding::Depth,
17809        ) -> fidl::Result<()> {
17810            encoder.debug_check_bounds::<AllocatorGetVmoInfoResponse>(offset);
17811            // Vector header
17812            let max_ordinal: u64 = self.max_ordinal_present();
17813            encoder.write_num(max_ordinal, offset);
17814            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17815            // Calling encoder.out_of_line_offset(0) is not allowed.
17816            if max_ordinal == 0 {
17817                return Ok(());
17818            }
17819            depth.increment()?;
17820            let envelope_size = 8;
17821            let bytes_len = max_ordinal as usize * envelope_size;
17822            #[allow(unused_variables)]
17823            let offset = encoder.out_of_line_offset(bytes_len);
17824            let mut _prev_end_offset: usize = 0;
17825            if 1 > max_ordinal {
17826                return Ok(());
17827            }
17828
17829            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17830            // are envelope_size bytes.
17831            let cur_offset: usize = (1 - 1) * envelope_size;
17832
17833            // Zero reserved fields.
17834            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17835
17836            // Safety:
17837            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17838            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17839            //   envelope_size bytes, there is always sufficient room.
17840            fidl::encoding::encode_in_envelope_optional::<
17841                u64,
17842                fidl::encoding::DefaultFuchsiaResourceDialect,
17843            >(
17844                self.buffer_collection_id
17845                    .as_ref()
17846                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17847                encoder,
17848                offset + cur_offset,
17849                depth,
17850            )?;
17851
17852            _prev_end_offset = cur_offset + envelope_size;
17853            if 2 > max_ordinal {
17854                return Ok(());
17855            }
17856
17857            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17858            // are envelope_size bytes.
17859            let cur_offset: usize = (2 - 1) * envelope_size;
17860
17861            // Zero reserved fields.
17862            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17863
17864            // Safety:
17865            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17866            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17867            //   envelope_size bytes, there is always sufficient room.
17868            fidl::encoding::encode_in_envelope_optional::<
17869                u64,
17870                fidl::encoding::DefaultFuchsiaResourceDialect,
17871            >(
17872                self.buffer_index.as_ref().map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17873                encoder,
17874                offset + cur_offset,
17875                depth,
17876            )?;
17877
17878            _prev_end_offset = cur_offset + envelope_size;
17879            if 3 > max_ordinal {
17880                return Ok(());
17881            }
17882
17883            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17884            // are envelope_size bytes.
17885            let cur_offset: usize = (3 - 1) * envelope_size;
17886
17887            // Zero reserved fields.
17888            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17889
17890            // Safety:
17891            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17892            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17893            //   envelope_size bytes, there is always sufficient room.
17894            fidl::encoding::encode_in_envelope_optional::<
17895                fidl::encoding::HandleType<
17896                    fidl::EventPair,
17897                    { fidl::ObjectType::EVENTPAIR.into_raw() },
17898                    2147483648,
17899                >,
17900                fidl::encoding::DefaultFuchsiaResourceDialect,
17901            >(
17902                self.close_weak_asap.as_mut().map(
17903                    <fidl::encoding::HandleType<
17904                        fidl::EventPair,
17905                        { fidl::ObjectType::EVENTPAIR.into_raw() },
17906                        2147483648,
17907                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17908                ),
17909                encoder,
17910                offset + cur_offset,
17911                depth,
17912            )?;
17913
17914            _prev_end_offset = cur_offset + envelope_size;
17915
17916            Ok(())
17917        }
17918    }
17919
17920    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17921        for AllocatorGetVmoInfoResponse
17922    {
17923        #[inline(always)]
17924        fn new_empty() -> Self {
17925            Self::default()
17926        }
17927
17928        unsafe fn decode(
17929            &mut self,
17930            decoder: &mut fidl::encoding::Decoder<
17931                '_,
17932                fidl::encoding::DefaultFuchsiaResourceDialect,
17933            >,
17934            offset: usize,
17935            mut depth: fidl::encoding::Depth,
17936        ) -> fidl::Result<()> {
17937            decoder.debug_check_bounds::<Self>(offset);
17938            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17939                None => return Err(fidl::Error::NotNullable),
17940                Some(len) => len,
17941            };
17942            // Calling decoder.out_of_line_offset(0) is not allowed.
17943            if len == 0 {
17944                return Ok(());
17945            };
17946            depth.increment()?;
17947            let envelope_size = 8;
17948            let bytes_len = len * envelope_size;
17949            let offset = decoder.out_of_line_offset(bytes_len)?;
17950            // Decode the envelope for each type.
17951            let mut _next_ordinal_to_read = 0;
17952            let mut next_offset = offset;
17953            let end_offset = offset + bytes_len;
17954            _next_ordinal_to_read += 1;
17955            if next_offset >= end_offset {
17956                return Ok(());
17957            }
17958
17959            // Decode unknown envelopes for gaps in ordinals.
17960            while _next_ordinal_to_read < 1 {
17961                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17962                _next_ordinal_to_read += 1;
17963                next_offset += envelope_size;
17964            }
17965
17966            let next_out_of_line = decoder.next_out_of_line();
17967            let handles_before = decoder.remaining_handles();
17968            if let Some((inlined, num_bytes, num_handles)) =
17969                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17970            {
17971                let member_inline_size =
17972                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
17973                if inlined != (member_inline_size <= 4) {
17974                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17975                }
17976                let inner_offset;
17977                let mut inner_depth = depth.clone();
17978                if inlined {
17979                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17980                    inner_offset = next_offset;
17981                } else {
17982                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17983                    inner_depth.increment()?;
17984                }
17985                let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
17986                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
17987                });
17988                fidl::decode!(
17989                    u64,
17990                    fidl::encoding::DefaultFuchsiaResourceDialect,
17991                    val_ref,
17992                    decoder,
17993                    inner_offset,
17994                    inner_depth
17995                )?;
17996                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17997                {
17998                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17999                }
18000                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18001                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18002                }
18003            }
18004
18005            next_offset += envelope_size;
18006            _next_ordinal_to_read += 1;
18007            if next_offset >= end_offset {
18008                return Ok(());
18009            }
18010
18011            // Decode unknown envelopes for gaps in ordinals.
18012            while _next_ordinal_to_read < 2 {
18013                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18014                _next_ordinal_to_read += 1;
18015                next_offset += envelope_size;
18016            }
18017
18018            let next_out_of_line = decoder.next_out_of_line();
18019            let handles_before = decoder.remaining_handles();
18020            if let Some((inlined, num_bytes, num_handles)) =
18021                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18022            {
18023                let member_inline_size =
18024                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18025                if inlined != (member_inline_size <= 4) {
18026                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18027                }
18028                let inner_offset;
18029                let mut inner_depth = depth.clone();
18030                if inlined {
18031                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18032                    inner_offset = next_offset;
18033                } else {
18034                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18035                    inner_depth.increment()?;
18036                }
18037                let val_ref = self.buffer_index.get_or_insert_with(|| {
18038                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
18039                });
18040                fidl::decode!(
18041                    u64,
18042                    fidl::encoding::DefaultFuchsiaResourceDialect,
18043                    val_ref,
18044                    decoder,
18045                    inner_offset,
18046                    inner_depth
18047                )?;
18048                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18049                {
18050                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18051                }
18052                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18053                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18054                }
18055            }
18056
18057            next_offset += envelope_size;
18058            _next_ordinal_to_read += 1;
18059            if next_offset >= end_offset {
18060                return Ok(());
18061            }
18062
18063            // Decode unknown envelopes for gaps in ordinals.
18064            while _next_ordinal_to_read < 3 {
18065                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18066                _next_ordinal_to_read += 1;
18067                next_offset += envelope_size;
18068            }
18069
18070            let next_out_of_line = decoder.next_out_of_line();
18071            let handles_before = decoder.remaining_handles();
18072            if let Some((inlined, num_bytes, num_handles)) =
18073                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18074            {
18075                let member_inline_size = <fidl::encoding::HandleType<
18076                    fidl::EventPair,
18077                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18078                    2147483648,
18079                > as fidl::encoding::TypeMarker>::inline_size(
18080                    decoder.context
18081                );
18082                if inlined != (member_inline_size <= 4) {
18083                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18084                }
18085                let inner_offset;
18086                let mut inner_depth = depth.clone();
18087                if inlined {
18088                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18089                    inner_offset = next_offset;
18090                } else {
18091                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18092                    inner_depth.increment()?;
18093                }
18094                let val_ref =
18095                self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18096                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18097                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18098                {
18099                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18100                }
18101                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18102                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18103                }
18104            }
18105
18106            next_offset += envelope_size;
18107
18108            // Decode the remaining unknown envelopes.
18109            while next_offset < end_offset {
18110                _next_ordinal_to_read += 1;
18111                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18112                next_offset += envelope_size;
18113            }
18114
18115            Ok(())
18116        }
18117    }
18118
18119    impl BufferCollectionAttachLifetimeTrackingRequest {
18120        #[inline(always)]
18121        fn max_ordinal_present(&self) -> u64 {
18122            if let Some(_) = self.buffers_remaining {
18123                return 2;
18124            }
18125            if let Some(_) = self.server_end {
18126                return 1;
18127            }
18128            0
18129        }
18130    }
18131
18132    impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18133        type Borrowed<'a> = &'a mut Self;
18134        fn take_or_borrow<'a>(
18135            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18136        ) -> Self::Borrowed<'a> {
18137            value
18138        }
18139    }
18140
18141    unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18142        type Owned = Self;
18143
18144        #[inline(always)]
18145        fn inline_align(_context: fidl::encoding::Context) -> usize {
18146            8
18147        }
18148
18149        #[inline(always)]
18150        fn inline_size(_context: fidl::encoding::Context) -> usize {
18151            16
18152        }
18153    }
18154
18155    unsafe impl
18156        fidl::encoding::Encode<
18157            BufferCollectionAttachLifetimeTrackingRequest,
18158            fidl::encoding::DefaultFuchsiaResourceDialect,
18159        > for &mut BufferCollectionAttachLifetimeTrackingRequest
18160    {
18161        unsafe fn encode(
18162            self,
18163            encoder: &mut fidl::encoding::Encoder<
18164                '_,
18165                fidl::encoding::DefaultFuchsiaResourceDialect,
18166            >,
18167            offset: usize,
18168            mut depth: fidl::encoding::Depth,
18169        ) -> fidl::Result<()> {
18170            encoder.debug_check_bounds::<BufferCollectionAttachLifetimeTrackingRequest>(offset);
18171            // Vector header
18172            let max_ordinal: u64 = self.max_ordinal_present();
18173            encoder.write_num(max_ordinal, offset);
18174            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18175            // Calling encoder.out_of_line_offset(0) is not allowed.
18176            if max_ordinal == 0 {
18177                return Ok(());
18178            }
18179            depth.increment()?;
18180            let envelope_size = 8;
18181            let bytes_len = max_ordinal as usize * envelope_size;
18182            #[allow(unused_variables)]
18183            let offset = encoder.out_of_line_offset(bytes_len);
18184            let mut _prev_end_offset: usize = 0;
18185            if 1 > max_ordinal {
18186                return Ok(());
18187            }
18188
18189            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18190            // are envelope_size bytes.
18191            let cur_offset: usize = (1 - 1) * envelope_size;
18192
18193            // Zero reserved fields.
18194            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18195
18196            // Safety:
18197            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18198            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18199            //   envelope_size bytes, there is always sufficient room.
18200            fidl::encoding::encode_in_envelope_optional::<
18201                fidl::encoding::HandleType<
18202                    fidl::EventPair,
18203                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18204                    2147483648,
18205                >,
18206                fidl::encoding::DefaultFuchsiaResourceDialect,
18207            >(
18208                self.server_end.as_mut().map(
18209                    <fidl::encoding::HandleType<
18210                        fidl::EventPair,
18211                        { fidl::ObjectType::EVENTPAIR.into_raw() },
18212                        2147483648,
18213                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18214                ),
18215                encoder,
18216                offset + cur_offset,
18217                depth,
18218            )?;
18219
18220            _prev_end_offset = cur_offset + envelope_size;
18221            if 2 > max_ordinal {
18222                return Ok(());
18223            }
18224
18225            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18226            // are envelope_size bytes.
18227            let cur_offset: usize = (2 - 1) * envelope_size;
18228
18229            // Zero reserved fields.
18230            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18231
18232            // Safety:
18233            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18234            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18235            //   envelope_size bytes, there is always sufficient room.
18236            fidl::encoding::encode_in_envelope_optional::<
18237                u32,
18238                fidl::encoding::DefaultFuchsiaResourceDialect,
18239            >(
18240                self.buffers_remaining
18241                    .as_ref()
18242                    .map(<u32 as fidl::encoding::ValueTypeMarker>::borrow),
18243                encoder,
18244                offset + cur_offset,
18245                depth,
18246            )?;
18247
18248            _prev_end_offset = cur_offset + envelope_size;
18249
18250            Ok(())
18251        }
18252    }
18253
18254    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18255        for BufferCollectionAttachLifetimeTrackingRequest
18256    {
18257        #[inline(always)]
18258        fn new_empty() -> Self {
18259            Self::default()
18260        }
18261
18262        unsafe fn decode(
18263            &mut self,
18264            decoder: &mut fidl::encoding::Decoder<
18265                '_,
18266                fidl::encoding::DefaultFuchsiaResourceDialect,
18267            >,
18268            offset: usize,
18269            mut depth: fidl::encoding::Depth,
18270        ) -> fidl::Result<()> {
18271            decoder.debug_check_bounds::<Self>(offset);
18272            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18273                None => return Err(fidl::Error::NotNullable),
18274                Some(len) => len,
18275            };
18276            // Calling decoder.out_of_line_offset(0) is not allowed.
18277            if len == 0 {
18278                return Ok(());
18279            };
18280            depth.increment()?;
18281            let envelope_size = 8;
18282            let bytes_len = len * envelope_size;
18283            let offset = decoder.out_of_line_offset(bytes_len)?;
18284            // Decode the envelope for each type.
18285            let mut _next_ordinal_to_read = 0;
18286            let mut next_offset = offset;
18287            let end_offset = offset + bytes_len;
18288            _next_ordinal_to_read += 1;
18289            if next_offset >= end_offset {
18290                return Ok(());
18291            }
18292
18293            // Decode unknown envelopes for gaps in ordinals.
18294            while _next_ordinal_to_read < 1 {
18295                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18296                _next_ordinal_to_read += 1;
18297                next_offset += envelope_size;
18298            }
18299
18300            let next_out_of_line = decoder.next_out_of_line();
18301            let handles_before = decoder.remaining_handles();
18302            if let Some((inlined, num_bytes, num_handles)) =
18303                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18304            {
18305                let member_inline_size = <fidl::encoding::HandleType<
18306                    fidl::EventPair,
18307                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18308                    2147483648,
18309                > as fidl::encoding::TypeMarker>::inline_size(
18310                    decoder.context
18311                );
18312                if inlined != (member_inline_size <= 4) {
18313                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18314                }
18315                let inner_offset;
18316                let mut inner_depth = depth.clone();
18317                if inlined {
18318                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18319                    inner_offset = next_offset;
18320                } else {
18321                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18322                    inner_depth.increment()?;
18323                }
18324                let val_ref =
18325                self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18326                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18327                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18328                {
18329                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18330                }
18331                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18332                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18333                }
18334            }
18335
18336            next_offset += envelope_size;
18337            _next_ordinal_to_read += 1;
18338            if next_offset >= end_offset {
18339                return Ok(());
18340            }
18341
18342            // Decode unknown envelopes for gaps in ordinals.
18343            while _next_ordinal_to_read < 2 {
18344                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18345                _next_ordinal_to_read += 1;
18346                next_offset += envelope_size;
18347            }
18348
18349            let next_out_of_line = decoder.next_out_of_line();
18350            let handles_before = decoder.remaining_handles();
18351            if let Some((inlined, num_bytes, num_handles)) =
18352                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18353            {
18354                let member_inline_size =
18355                    <u32 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18356                if inlined != (member_inline_size <= 4) {
18357                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18358                }
18359                let inner_offset;
18360                let mut inner_depth = depth.clone();
18361                if inlined {
18362                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18363                    inner_offset = next_offset;
18364                } else {
18365                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18366                    inner_depth.increment()?;
18367                }
18368                let val_ref = self.buffers_remaining.get_or_insert_with(|| {
18369                    fidl::new_empty!(u32, fidl::encoding::DefaultFuchsiaResourceDialect)
18370                });
18371                fidl::decode!(
18372                    u32,
18373                    fidl::encoding::DefaultFuchsiaResourceDialect,
18374                    val_ref,
18375                    decoder,
18376                    inner_offset,
18377                    inner_depth
18378                )?;
18379                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18380                {
18381                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18382                }
18383                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18384                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18385                }
18386            }
18387
18388            next_offset += envelope_size;
18389
18390            // Decode the remaining unknown envelopes.
18391            while next_offset < end_offset {
18392                _next_ordinal_to_read += 1;
18393                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18394                next_offset += envelope_size;
18395            }
18396
18397            Ok(())
18398        }
18399    }
18400
18401    impl BufferCollectionAttachTokenRequest {
18402        #[inline(always)]
18403        fn max_ordinal_present(&self) -> u64 {
18404            if let Some(_) = self.token_request {
18405                return 2;
18406            }
18407            if let Some(_) = self.rights_attenuation_mask {
18408                return 1;
18409            }
18410            0
18411        }
18412    }
18413
18414    impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachTokenRequest {
18415        type Borrowed<'a> = &'a mut Self;
18416        fn take_or_borrow<'a>(
18417            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18418        ) -> Self::Borrowed<'a> {
18419            value
18420        }
18421    }
18422
18423    unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachTokenRequest {
18424        type Owned = Self;
18425
18426        #[inline(always)]
18427        fn inline_align(_context: fidl::encoding::Context) -> usize {
18428            8
18429        }
18430
18431        #[inline(always)]
18432        fn inline_size(_context: fidl::encoding::Context) -> usize {
18433            16
18434        }
18435    }
18436
18437    unsafe impl
18438        fidl::encoding::Encode<
18439            BufferCollectionAttachTokenRequest,
18440            fidl::encoding::DefaultFuchsiaResourceDialect,
18441        > for &mut BufferCollectionAttachTokenRequest
18442    {
18443        unsafe fn encode(
18444            self,
18445            encoder: &mut fidl::encoding::Encoder<
18446                '_,
18447                fidl::encoding::DefaultFuchsiaResourceDialect,
18448            >,
18449            offset: usize,
18450            mut depth: fidl::encoding::Depth,
18451        ) -> fidl::Result<()> {
18452            encoder.debug_check_bounds::<BufferCollectionAttachTokenRequest>(offset);
18453            // Vector header
18454            let max_ordinal: u64 = self.max_ordinal_present();
18455            encoder.write_num(max_ordinal, offset);
18456            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18457            // Calling encoder.out_of_line_offset(0) is not allowed.
18458            if max_ordinal == 0 {
18459                return Ok(());
18460            }
18461            depth.increment()?;
18462            let envelope_size = 8;
18463            let bytes_len = max_ordinal as usize * envelope_size;
18464            #[allow(unused_variables)]
18465            let offset = encoder.out_of_line_offset(bytes_len);
18466            let mut _prev_end_offset: usize = 0;
18467            if 1 > max_ordinal {
18468                return Ok(());
18469            }
18470
18471            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18472            // are envelope_size bytes.
18473            let cur_offset: usize = (1 - 1) * envelope_size;
18474
18475            // Zero reserved fields.
18476            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18477
18478            // Safety:
18479            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18480            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18481            //   envelope_size bytes, there is always sufficient room.
18482            fidl::encoding::encode_in_envelope_optional::<
18483                fidl::Rights,
18484                fidl::encoding::DefaultFuchsiaResourceDialect,
18485            >(
18486                self.rights_attenuation_mask
18487                    .as_ref()
18488                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
18489                encoder,
18490                offset + cur_offset,
18491                depth,
18492            )?;
18493
18494            _prev_end_offset = cur_offset + envelope_size;
18495            if 2 > max_ordinal {
18496                return Ok(());
18497            }
18498
18499            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18500            // are envelope_size bytes.
18501            let cur_offset: usize = (2 - 1) * envelope_size;
18502
18503            // Zero reserved fields.
18504            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18505
18506            // Safety:
18507            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18508            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18509            //   envelope_size bytes, there is always sufficient room.
18510            fidl::encoding::encode_in_envelope_optional::<
18511                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
18512                fidl::encoding::DefaultFuchsiaResourceDialect,
18513            >(
18514                self.token_request.as_mut().map(
18515                    <fidl::encoding::Endpoint<
18516                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18517                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18518                ),
18519                encoder,
18520                offset + cur_offset,
18521                depth,
18522            )?;
18523
18524            _prev_end_offset = cur_offset + envelope_size;
18525
18526            Ok(())
18527        }
18528    }
18529
18530    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18531        for BufferCollectionAttachTokenRequest
18532    {
18533        #[inline(always)]
18534        fn new_empty() -> Self {
18535            Self::default()
18536        }
18537
18538        unsafe fn decode(
18539            &mut self,
18540            decoder: &mut fidl::encoding::Decoder<
18541                '_,
18542                fidl::encoding::DefaultFuchsiaResourceDialect,
18543            >,
18544            offset: usize,
18545            mut depth: fidl::encoding::Depth,
18546        ) -> fidl::Result<()> {
18547            decoder.debug_check_bounds::<Self>(offset);
18548            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18549                None => return Err(fidl::Error::NotNullable),
18550                Some(len) => len,
18551            };
18552            // Calling decoder.out_of_line_offset(0) is not allowed.
18553            if len == 0 {
18554                return Ok(());
18555            };
18556            depth.increment()?;
18557            let envelope_size = 8;
18558            let bytes_len = len * envelope_size;
18559            let offset = decoder.out_of_line_offset(bytes_len)?;
18560            // Decode the envelope for each type.
18561            let mut _next_ordinal_to_read = 0;
18562            let mut next_offset = offset;
18563            let end_offset = offset + bytes_len;
18564            _next_ordinal_to_read += 1;
18565            if next_offset >= end_offset {
18566                return Ok(());
18567            }
18568
18569            // Decode unknown envelopes for gaps in ordinals.
18570            while _next_ordinal_to_read < 1 {
18571                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18572                _next_ordinal_to_read += 1;
18573                next_offset += envelope_size;
18574            }
18575
18576            let next_out_of_line = decoder.next_out_of_line();
18577            let handles_before = decoder.remaining_handles();
18578            if let Some((inlined, num_bytes, num_handles)) =
18579                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18580            {
18581                let member_inline_size =
18582                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18583                if inlined != (member_inline_size <= 4) {
18584                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18585                }
18586                let inner_offset;
18587                let mut inner_depth = depth.clone();
18588                if inlined {
18589                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18590                    inner_offset = next_offset;
18591                } else {
18592                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18593                    inner_depth.increment()?;
18594                }
18595                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
18596                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
18597                });
18598                fidl::decode!(
18599                    fidl::Rights,
18600                    fidl::encoding::DefaultFuchsiaResourceDialect,
18601                    val_ref,
18602                    decoder,
18603                    inner_offset,
18604                    inner_depth
18605                )?;
18606                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18607                {
18608                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18609                }
18610                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18611                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18612                }
18613            }
18614
18615            next_offset += envelope_size;
18616            _next_ordinal_to_read += 1;
18617            if next_offset >= end_offset {
18618                return Ok(());
18619            }
18620
18621            // Decode unknown envelopes for gaps in ordinals.
18622            while _next_ordinal_to_read < 2 {
18623                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18624                _next_ordinal_to_read += 1;
18625                next_offset += envelope_size;
18626            }
18627
18628            let next_out_of_line = decoder.next_out_of_line();
18629            let handles_before = decoder.remaining_handles();
18630            if let Some((inlined, num_bytes, num_handles)) =
18631                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18632            {
18633                let member_inline_size = <fidl::encoding::Endpoint<
18634                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18635                > as fidl::encoding::TypeMarker>::inline_size(
18636                    decoder.context
18637                );
18638                if inlined != (member_inline_size <= 4) {
18639                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18640                }
18641                let inner_offset;
18642                let mut inner_depth = depth.clone();
18643                if inlined {
18644                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18645                    inner_offset = next_offset;
18646                } else {
18647                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18648                    inner_depth.increment()?;
18649                }
18650                let val_ref = self.token_request.get_or_insert_with(|| {
18651                    fidl::new_empty!(
18652                        fidl::encoding::Endpoint<
18653                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18654                        >,
18655                        fidl::encoding::DefaultFuchsiaResourceDialect
18656                    )
18657                });
18658                fidl::decode!(
18659                    fidl::encoding::Endpoint<
18660                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18661                    >,
18662                    fidl::encoding::DefaultFuchsiaResourceDialect,
18663                    val_ref,
18664                    decoder,
18665                    inner_offset,
18666                    inner_depth
18667                )?;
18668                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18669                {
18670                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18671                }
18672                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18673                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18674                }
18675            }
18676
18677            next_offset += envelope_size;
18678
18679            // Decode the remaining unknown envelopes.
18680            while next_offset < end_offset {
18681                _next_ordinal_to_read += 1;
18682                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18683                next_offset += envelope_size;
18684            }
18685
18686            Ok(())
18687        }
18688    }
18689
18690    impl BufferCollectionInfo {
18691        #[inline(always)]
18692        fn max_ordinal_present(&self) -> u64 {
18693            if let Some(_) = self.buffer_collection_id {
18694                return 3;
18695            }
18696            if let Some(_) = self.buffers {
18697                return 2;
18698            }
18699            if let Some(_) = self.settings {
18700                return 1;
18701            }
18702            0
18703        }
18704    }
18705
18706    impl fidl::encoding::ResourceTypeMarker for BufferCollectionInfo {
18707        type Borrowed<'a> = &'a mut Self;
18708        fn take_or_borrow<'a>(
18709            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18710        ) -> Self::Borrowed<'a> {
18711            value
18712        }
18713    }
18714
18715    unsafe impl fidl::encoding::TypeMarker for BufferCollectionInfo {
18716        type Owned = Self;
18717
18718        #[inline(always)]
18719        fn inline_align(_context: fidl::encoding::Context) -> usize {
18720            8
18721        }
18722
18723        #[inline(always)]
18724        fn inline_size(_context: fidl::encoding::Context) -> usize {
18725            16
18726        }
18727    }
18728
18729    unsafe impl
18730        fidl::encoding::Encode<BufferCollectionInfo, fidl::encoding::DefaultFuchsiaResourceDialect>
18731        for &mut BufferCollectionInfo
18732    {
18733        unsafe fn encode(
18734            self,
18735            encoder: &mut fidl::encoding::Encoder<
18736                '_,
18737                fidl::encoding::DefaultFuchsiaResourceDialect,
18738            >,
18739            offset: usize,
18740            mut depth: fidl::encoding::Depth,
18741        ) -> fidl::Result<()> {
18742            encoder.debug_check_bounds::<BufferCollectionInfo>(offset);
18743            // Vector header
18744            let max_ordinal: u64 = self.max_ordinal_present();
18745            encoder.write_num(max_ordinal, offset);
18746            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18747            // Calling encoder.out_of_line_offset(0) is not allowed.
18748            if max_ordinal == 0 {
18749                return Ok(());
18750            }
18751            depth.increment()?;
18752            let envelope_size = 8;
18753            let bytes_len = max_ordinal as usize * envelope_size;
18754            #[allow(unused_variables)]
18755            let offset = encoder.out_of_line_offset(bytes_len);
18756            let mut _prev_end_offset: usize = 0;
18757            if 1 > max_ordinal {
18758                return Ok(());
18759            }
18760
18761            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18762            // are envelope_size bytes.
18763            let cur_offset: usize = (1 - 1) * envelope_size;
18764
18765            // Zero reserved fields.
18766            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18767
18768            // Safety:
18769            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18770            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18771            //   envelope_size bytes, there is always sufficient room.
18772            fidl::encoding::encode_in_envelope_optional::<
18773                SingleBufferSettings,
18774                fidl::encoding::DefaultFuchsiaResourceDialect,
18775            >(
18776                self.settings
18777                    .as_ref()
18778                    .map(<SingleBufferSettings as fidl::encoding::ValueTypeMarker>::borrow),
18779                encoder,
18780                offset + cur_offset,
18781                depth,
18782            )?;
18783
18784            _prev_end_offset = cur_offset + envelope_size;
18785            if 2 > max_ordinal {
18786                return Ok(());
18787            }
18788
18789            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18790            // are envelope_size bytes.
18791            let cur_offset: usize = (2 - 1) * envelope_size;
18792
18793            // Zero reserved fields.
18794            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18795
18796            // Safety:
18797            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18798            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18799            //   envelope_size bytes, there is always sufficient room.
18800            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect>(
18801            self.buffers.as_mut().map(<fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
18802            encoder, offset + cur_offset, depth
18803        )?;
18804
18805            _prev_end_offset = cur_offset + envelope_size;
18806            if 3 > max_ordinal {
18807                return Ok(());
18808            }
18809
18810            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18811            // are envelope_size bytes.
18812            let cur_offset: usize = (3 - 1) * envelope_size;
18813
18814            // Zero reserved fields.
18815            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18816
18817            // Safety:
18818            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18819            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18820            //   envelope_size bytes, there is always sufficient room.
18821            fidl::encoding::encode_in_envelope_optional::<
18822                u64,
18823                fidl::encoding::DefaultFuchsiaResourceDialect,
18824            >(
18825                self.buffer_collection_id
18826                    .as_ref()
18827                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
18828                encoder,
18829                offset + cur_offset,
18830                depth,
18831            )?;
18832
18833            _prev_end_offset = cur_offset + envelope_size;
18834
18835            Ok(())
18836        }
18837    }
18838
18839    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18840        for BufferCollectionInfo
18841    {
18842        #[inline(always)]
18843        fn new_empty() -> Self {
18844            Self::default()
18845        }
18846
18847        unsafe fn decode(
18848            &mut self,
18849            decoder: &mut fidl::encoding::Decoder<
18850                '_,
18851                fidl::encoding::DefaultFuchsiaResourceDialect,
18852            >,
18853            offset: usize,
18854            mut depth: fidl::encoding::Depth,
18855        ) -> fidl::Result<()> {
18856            decoder.debug_check_bounds::<Self>(offset);
18857            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18858                None => return Err(fidl::Error::NotNullable),
18859                Some(len) => len,
18860            };
18861            // Calling decoder.out_of_line_offset(0) is not allowed.
18862            if len == 0 {
18863                return Ok(());
18864            };
18865            depth.increment()?;
18866            let envelope_size = 8;
18867            let bytes_len = len * envelope_size;
18868            let offset = decoder.out_of_line_offset(bytes_len)?;
18869            // Decode the envelope for each type.
18870            let mut _next_ordinal_to_read = 0;
18871            let mut next_offset = offset;
18872            let end_offset = offset + bytes_len;
18873            _next_ordinal_to_read += 1;
18874            if next_offset >= end_offset {
18875                return Ok(());
18876            }
18877
18878            // Decode unknown envelopes for gaps in ordinals.
18879            while _next_ordinal_to_read < 1 {
18880                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18881                _next_ordinal_to_read += 1;
18882                next_offset += envelope_size;
18883            }
18884
18885            let next_out_of_line = decoder.next_out_of_line();
18886            let handles_before = decoder.remaining_handles();
18887            if let Some((inlined, num_bytes, num_handles)) =
18888                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18889            {
18890                let member_inline_size =
18891                    <SingleBufferSettings as fidl::encoding::TypeMarker>::inline_size(
18892                        decoder.context,
18893                    );
18894                if inlined != (member_inline_size <= 4) {
18895                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18896                }
18897                let inner_offset;
18898                let mut inner_depth = depth.clone();
18899                if inlined {
18900                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18901                    inner_offset = next_offset;
18902                } else {
18903                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18904                    inner_depth.increment()?;
18905                }
18906                let val_ref = self.settings.get_or_insert_with(|| {
18907                    fidl::new_empty!(
18908                        SingleBufferSettings,
18909                        fidl::encoding::DefaultFuchsiaResourceDialect
18910                    )
18911                });
18912                fidl::decode!(
18913                    SingleBufferSettings,
18914                    fidl::encoding::DefaultFuchsiaResourceDialect,
18915                    val_ref,
18916                    decoder,
18917                    inner_offset,
18918                    inner_depth
18919                )?;
18920                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18921                {
18922                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18923                }
18924                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18925                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18926                }
18927            }
18928
18929            next_offset += envelope_size;
18930            _next_ordinal_to_read += 1;
18931            if next_offset >= end_offset {
18932                return Ok(());
18933            }
18934
18935            // Decode unknown envelopes for gaps in ordinals.
18936            while _next_ordinal_to_read < 2 {
18937                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18938                _next_ordinal_to_read += 1;
18939                next_offset += envelope_size;
18940            }
18941
18942            let next_out_of_line = decoder.next_out_of_line();
18943            let handles_before = decoder.remaining_handles();
18944            if let Some((inlined, num_bytes, num_handles)) =
18945                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18946            {
18947                let member_inline_size = <fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18948                if inlined != (member_inline_size <= 4) {
18949                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18950                }
18951                let inner_offset;
18952                let mut inner_depth = depth.clone();
18953                if inlined {
18954                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18955                    inner_offset = next_offset;
18956                } else {
18957                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18958                    inner_depth.increment()?;
18959                }
18960                let val_ref =
18961                self.buffers.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect));
18962                fidl::decode!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18963                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18964                {
18965                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18966                }
18967                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18968                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18969                }
18970            }
18971
18972            next_offset += envelope_size;
18973            _next_ordinal_to_read += 1;
18974            if next_offset >= end_offset {
18975                return Ok(());
18976            }
18977
18978            // Decode unknown envelopes for gaps in ordinals.
18979            while _next_ordinal_to_read < 3 {
18980                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18981                _next_ordinal_to_read += 1;
18982                next_offset += envelope_size;
18983            }
18984
18985            let next_out_of_line = decoder.next_out_of_line();
18986            let handles_before = decoder.remaining_handles();
18987            if let Some((inlined, num_bytes, num_handles)) =
18988                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18989            {
18990                let member_inline_size =
18991                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18992                if inlined != (member_inline_size <= 4) {
18993                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18994                }
18995                let inner_offset;
18996                let mut inner_depth = depth.clone();
18997                if inlined {
18998                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18999                    inner_offset = next_offset;
19000                } else {
19001                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19002                    inner_depth.increment()?;
19003                }
19004                let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
19005                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
19006                });
19007                fidl::decode!(
19008                    u64,
19009                    fidl::encoding::DefaultFuchsiaResourceDialect,
19010                    val_ref,
19011                    decoder,
19012                    inner_offset,
19013                    inner_depth
19014                )?;
19015                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19016                {
19017                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19018                }
19019                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19020                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19021                }
19022            }
19023
19024            next_offset += envelope_size;
19025
19026            // Decode the remaining unknown envelopes.
19027            while next_offset < end_offset {
19028                _next_ordinal_to_read += 1;
19029                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19030                next_offset += envelope_size;
19031            }
19032
19033            Ok(())
19034        }
19035    }
19036
19037    impl BufferCollectionSetConstraintsRequest {
19038        #[inline(always)]
19039        fn max_ordinal_present(&self) -> u64 {
19040            if let Some(_) = self.constraints {
19041                return 1;
19042            }
19043            0
19044        }
19045    }
19046
19047    impl fidl::encoding::ResourceTypeMarker for BufferCollectionSetConstraintsRequest {
19048        type Borrowed<'a> = &'a mut Self;
19049        fn take_or_borrow<'a>(
19050            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19051        ) -> Self::Borrowed<'a> {
19052            value
19053        }
19054    }
19055
19056    unsafe impl fidl::encoding::TypeMarker for BufferCollectionSetConstraintsRequest {
19057        type Owned = Self;
19058
19059        #[inline(always)]
19060        fn inline_align(_context: fidl::encoding::Context) -> usize {
19061            8
19062        }
19063
19064        #[inline(always)]
19065        fn inline_size(_context: fidl::encoding::Context) -> usize {
19066            16
19067        }
19068    }
19069
19070    unsafe impl
19071        fidl::encoding::Encode<
19072            BufferCollectionSetConstraintsRequest,
19073            fidl::encoding::DefaultFuchsiaResourceDialect,
19074        > for &mut BufferCollectionSetConstraintsRequest
19075    {
19076        unsafe fn encode(
19077            self,
19078            encoder: &mut fidl::encoding::Encoder<
19079                '_,
19080                fidl::encoding::DefaultFuchsiaResourceDialect,
19081            >,
19082            offset: usize,
19083            mut depth: fidl::encoding::Depth,
19084        ) -> fidl::Result<()> {
19085            encoder.debug_check_bounds::<BufferCollectionSetConstraintsRequest>(offset);
19086            // Vector header
19087            let max_ordinal: u64 = self.max_ordinal_present();
19088            encoder.write_num(max_ordinal, offset);
19089            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19090            // Calling encoder.out_of_line_offset(0) is not allowed.
19091            if max_ordinal == 0 {
19092                return Ok(());
19093            }
19094            depth.increment()?;
19095            let envelope_size = 8;
19096            let bytes_len = max_ordinal as usize * envelope_size;
19097            #[allow(unused_variables)]
19098            let offset = encoder.out_of_line_offset(bytes_len);
19099            let mut _prev_end_offset: usize = 0;
19100            if 1 > max_ordinal {
19101                return Ok(());
19102            }
19103
19104            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19105            // are envelope_size bytes.
19106            let cur_offset: usize = (1 - 1) * envelope_size;
19107
19108            // Zero reserved fields.
19109            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19110
19111            // Safety:
19112            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19113            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19114            //   envelope_size bytes, there is always sufficient room.
19115            fidl::encoding::encode_in_envelope_optional::<
19116                BufferCollectionConstraints,
19117                fidl::encoding::DefaultFuchsiaResourceDialect,
19118            >(
19119                self.constraints
19120                    .as_ref()
19121                    .map(<BufferCollectionConstraints as fidl::encoding::ValueTypeMarker>::borrow),
19122                encoder,
19123                offset + cur_offset,
19124                depth,
19125            )?;
19126
19127            _prev_end_offset = cur_offset + envelope_size;
19128
19129            Ok(())
19130        }
19131    }
19132
19133    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19134        for BufferCollectionSetConstraintsRequest
19135    {
19136        #[inline(always)]
19137        fn new_empty() -> Self {
19138            Self::default()
19139        }
19140
19141        unsafe fn decode(
19142            &mut self,
19143            decoder: &mut fidl::encoding::Decoder<
19144                '_,
19145                fidl::encoding::DefaultFuchsiaResourceDialect,
19146            >,
19147            offset: usize,
19148            mut depth: fidl::encoding::Depth,
19149        ) -> fidl::Result<()> {
19150            decoder.debug_check_bounds::<Self>(offset);
19151            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19152                None => return Err(fidl::Error::NotNullable),
19153                Some(len) => len,
19154            };
19155            // Calling decoder.out_of_line_offset(0) is not allowed.
19156            if len == 0 {
19157                return Ok(());
19158            };
19159            depth.increment()?;
19160            let envelope_size = 8;
19161            let bytes_len = len * envelope_size;
19162            let offset = decoder.out_of_line_offset(bytes_len)?;
19163            // Decode the envelope for each type.
19164            let mut _next_ordinal_to_read = 0;
19165            let mut next_offset = offset;
19166            let end_offset = offset + bytes_len;
19167            _next_ordinal_to_read += 1;
19168            if next_offset >= end_offset {
19169                return Ok(());
19170            }
19171
19172            // Decode unknown envelopes for gaps in ordinals.
19173            while _next_ordinal_to_read < 1 {
19174                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19175                _next_ordinal_to_read += 1;
19176                next_offset += envelope_size;
19177            }
19178
19179            let next_out_of_line = decoder.next_out_of_line();
19180            let handles_before = decoder.remaining_handles();
19181            if let Some((inlined, num_bytes, num_handles)) =
19182                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19183            {
19184                let member_inline_size =
19185                    <BufferCollectionConstraints as fidl::encoding::TypeMarker>::inline_size(
19186                        decoder.context,
19187                    );
19188                if inlined != (member_inline_size <= 4) {
19189                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19190                }
19191                let inner_offset;
19192                let mut inner_depth = depth.clone();
19193                if inlined {
19194                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19195                    inner_offset = next_offset;
19196                } else {
19197                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19198                    inner_depth.increment()?;
19199                }
19200                let val_ref = self.constraints.get_or_insert_with(|| {
19201                    fidl::new_empty!(
19202                        BufferCollectionConstraints,
19203                        fidl::encoding::DefaultFuchsiaResourceDialect
19204                    )
19205                });
19206                fidl::decode!(
19207                    BufferCollectionConstraints,
19208                    fidl::encoding::DefaultFuchsiaResourceDialect,
19209                    val_ref,
19210                    decoder,
19211                    inner_offset,
19212                    inner_depth
19213                )?;
19214                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19215                {
19216                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19217                }
19218                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19219                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19220                }
19221            }
19222
19223            next_offset += envelope_size;
19224
19225            // Decode the remaining unknown envelopes.
19226            while next_offset < end_offset {
19227                _next_ordinal_to_read += 1;
19228                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19229                next_offset += envelope_size;
19230            }
19231
19232            Ok(())
19233        }
19234    }
19235
19236    impl BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
19237        #[inline(always)]
19238        fn max_ordinal_present(&self) -> u64 {
19239            if let Some(_) = self.group_request {
19240                return 1;
19241            }
19242            0
19243        }
19244    }
19245
19246    impl fidl::encoding::ResourceTypeMarker
19247        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19248    {
19249        type Borrowed<'a> = &'a mut Self;
19250        fn take_or_borrow<'a>(
19251            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19252        ) -> Self::Borrowed<'a> {
19253            value
19254        }
19255    }
19256
19257    unsafe impl fidl::encoding::TypeMarker
19258        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19259    {
19260        type Owned = Self;
19261
19262        #[inline(always)]
19263        fn inline_align(_context: fidl::encoding::Context) -> usize {
19264            8
19265        }
19266
19267        #[inline(always)]
19268        fn inline_size(_context: fidl::encoding::Context) -> usize {
19269            16
19270        }
19271    }
19272
19273    unsafe impl
19274        fidl::encoding::Encode<
19275            BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
19276            fidl::encoding::DefaultFuchsiaResourceDialect,
19277        > for &mut BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19278    {
19279        unsafe fn encode(
19280            self,
19281            encoder: &mut fidl::encoding::Encoder<
19282                '_,
19283                fidl::encoding::DefaultFuchsiaResourceDialect,
19284            >,
19285            offset: usize,
19286            mut depth: fidl::encoding::Depth,
19287        ) -> fidl::Result<()> {
19288            encoder
19289                .debug_check_bounds::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
19290                    offset,
19291                );
19292            // Vector header
19293            let max_ordinal: u64 = self.max_ordinal_present();
19294            encoder.write_num(max_ordinal, offset);
19295            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19296            // Calling encoder.out_of_line_offset(0) is not allowed.
19297            if max_ordinal == 0 {
19298                return Ok(());
19299            }
19300            depth.increment()?;
19301            let envelope_size = 8;
19302            let bytes_len = max_ordinal as usize * envelope_size;
19303            #[allow(unused_variables)]
19304            let offset = encoder.out_of_line_offset(bytes_len);
19305            let mut _prev_end_offset: usize = 0;
19306            if 1 > max_ordinal {
19307                return Ok(());
19308            }
19309
19310            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19311            // are envelope_size bytes.
19312            let cur_offset: usize = (1 - 1) * envelope_size;
19313
19314            // Zero reserved fields.
19315            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19316
19317            // Safety:
19318            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19319            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19320            //   envelope_size bytes, there is always sufficient room.
19321            fidl::encoding::encode_in_envelope_optional::<
19322                fidl::encoding::Endpoint<
19323                    fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19324                >,
19325                fidl::encoding::DefaultFuchsiaResourceDialect,
19326            >(
19327                self.group_request.as_mut().map(
19328                    <fidl::encoding::Endpoint<
19329                        fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19330                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19331                ),
19332                encoder,
19333                offset + cur_offset,
19334                depth,
19335            )?;
19336
19337            _prev_end_offset = cur_offset + envelope_size;
19338
19339            Ok(())
19340        }
19341    }
19342
19343    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19344        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19345    {
19346        #[inline(always)]
19347        fn new_empty() -> Self {
19348            Self::default()
19349        }
19350
19351        unsafe fn decode(
19352            &mut self,
19353            decoder: &mut fidl::encoding::Decoder<
19354                '_,
19355                fidl::encoding::DefaultFuchsiaResourceDialect,
19356            >,
19357            offset: usize,
19358            mut depth: fidl::encoding::Depth,
19359        ) -> fidl::Result<()> {
19360            decoder.debug_check_bounds::<Self>(offset);
19361            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19362                None => return Err(fidl::Error::NotNullable),
19363                Some(len) => len,
19364            };
19365            // Calling decoder.out_of_line_offset(0) is not allowed.
19366            if len == 0 {
19367                return Ok(());
19368            };
19369            depth.increment()?;
19370            let envelope_size = 8;
19371            let bytes_len = len * envelope_size;
19372            let offset = decoder.out_of_line_offset(bytes_len)?;
19373            // Decode the envelope for each type.
19374            let mut _next_ordinal_to_read = 0;
19375            let mut next_offset = offset;
19376            let end_offset = offset + bytes_len;
19377            _next_ordinal_to_read += 1;
19378            if next_offset >= end_offset {
19379                return Ok(());
19380            }
19381
19382            // Decode unknown envelopes for gaps in ordinals.
19383            while _next_ordinal_to_read < 1 {
19384                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19385                _next_ordinal_to_read += 1;
19386                next_offset += envelope_size;
19387            }
19388
19389            let next_out_of_line = decoder.next_out_of_line();
19390            let handles_before = decoder.remaining_handles();
19391            if let Some((inlined, num_bytes, num_handles)) =
19392                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19393            {
19394                let member_inline_size = <fidl::encoding::Endpoint<
19395                    fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19396                > as fidl::encoding::TypeMarker>::inline_size(
19397                    decoder.context
19398                );
19399                if inlined != (member_inline_size <= 4) {
19400                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19401                }
19402                let inner_offset;
19403                let mut inner_depth = depth.clone();
19404                if inlined {
19405                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19406                    inner_offset = next_offset;
19407                } else {
19408                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19409                    inner_depth.increment()?;
19410                }
19411                let val_ref = self.group_request.get_or_insert_with(|| {
19412                    fidl::new_empty!(
19413                        fidl::encoding::Endpoint<
19414                            fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19415                        >,
19416                        fidl::encoding::DefaultFuchsiaResourceDialect
19417                    )
19418                });
19419                fidl::decode!(
19420                    fidl::encoding::Endpoint<
19421                        fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19422                    >,
19423                    fidl::encoding::DefaultFuchsiaResourceDialect,
19424                    val_ref,
19425                    decoder,
19426                    inner_offset,
19427                    inner_depth
19428                )?;
19429                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19430                {
19431                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19432                }
19433                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19434                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19435                }
19436            }
19437
19438            next_offset += envelope_size;
19439
19440            // Decode the remaining unknown envelopes.
19441            while next_offset < end_offset {
19442                _next_ordinal_to_read += 1;
19443                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19444                next_offset += envelope_size;
19445            }
19446
19447            Ok(())
19448        }
19449    }
19450
19451    impl BufferCollectionTokenDuplicateRequest {
19452        #[inline(always)]
19453        fn max_ordinal_present(&self) -> u64 {
19454            if let Some(_) = self.token_request {
19455                return 2;
19456            }
19457            if let Some(_) = self.rights_attenuation_mask {
19458                return 1;
19459            }
19460            0
19461        }
19462    }
19463
19464    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateRequest {
19465        type Borrowed<'a> = &'a mut Self;
19466        fn take_or_borrow<'a>(
19467            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19468        ) -> Self::Borrowed<'a> {
19469            value
19470        }
19471    }
19472
19473    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateRequest {
19474        type Owned = Self;
19475
19476        #[inline(always)]
19477        fn inline_align(_context: fidl::encoding::Context) -> usize {
19478            8
19479        }
19480
19481        #[inline(always)]
19482        fn inline_size(_context: fidl::encoding::Context) -> usize {
19483            16
19484        }
19485    }
19486
19487    unsafe impl
19488        fidl::encoding::Encode<
19489            BufferCollectionTokenDuplicateRequest,
19490            fidl::encoding::DefaultFuchsiaResourceDialect,
19491        > for &mut BufferCollectionTokenDuplicateRequest
19492    {
19493        unsafe fn encode(
19494            self,
19495            encoder: &mut fidl::encoding::Encoder<
19496                '_,
19497                fidl::encoding::DefaultFuchsiaResourceDialect,
19498            >,
19499            offset: usize,
19500            mut depth: fidl::encoding::Depth,
19501        ) -> fidl::Result<()> {
19502            encoder.debug_check_bounds::<BufferCollectionTokenDuplicateRequest>(offset);
19503            // Vector header
19504            let max_ordinal: u64 = self.max_ordinal_present();
19505            encoder.write_num(max_ordinal, offset);
19506            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19507            // Calling encoder.out_of_line_offset(0) is not allowed.
19508            if max_ordinal == 0 {
19509                return Ok(());
19510            }
19511            depth.increment()?;
19512            let envelope_size = 8;
19513            let bytes_len = max_ordinal as usize * envelope_size;
19514            #[allow(unused_variables)]
19515            let offset = encoder.out_of_line_offset(bytes_len);
19516            let mut _prev_end_offset: usize = 0;
19517            if 1 > max_ordinal {
19518                return Ok(());
19519            }
19520
19521            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19522            // are envelope_size bytes.
19523            let cur_offset: usize = (1 - 1) * envelope_size;
19524
19525            // Zero reserved fields.
19526            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19527
19528            // Safety:
19529            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19530            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19531            //   envelope_size bytes, there is always sufficient room.
19532            fidl::encoding::encode_in_envelope_optional::<
19533                fidl::Rights,
19534                fidl::encoding::DefaultFuchsiaResourceDialect,
19535            >(
19536                self.rights_attenuation_mask
19537                    .as_ref()
19538                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19539                encoder,
19540                offset + cur_offset,
19541                depth,
19542            )?;
19543
19544            _prev_end_offset = cur_offset + envelope_size;
19545            if 2 > max_ordinal {
19546                return Ok(());
19547            }
19548
19549            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19550            // are envelope_size bytes.
19551            let cur_offset: usize = (2 - 1) * envelope_size;
19552
19553            // Zero reserved fields.
19554            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19555
19556            // Safety:
19557            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19558            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19559            //   envelope_size bytes, there is always sufficient room.
19560            fidl::encoding::encode_in_envelope_optional::<
19561                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19562                fidl::encoding::DefaultFuchsiaResourceDialect,
19563            >(
19564                self.token_request.as_mut().map(
19565                    <fidl::encoding::Endpoint<
19566                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19567                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19568                ),
19569                encoder,
19570                offset + cur_offset,
19571                depth,
19572            )?;
19573
19574            _prev_end_offset = cur_offset + envelope_size;
19575
19576            Ok(())
19577        }
19578    }
19579
19580    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19581        for BufferCollectionTokenDuplicateRequest
19582    {
19583        #[inline(always)]
19584        fn new_empty() -> Self {
19585            Self::default()
19586        }
19587
19588        unsafe fn decode(
19589            &mut self,
19590            decoder: &mut fidl::encoding::Decoder<
19591                '_,
19592                fidl::encoding::DefaultFuchsiaResourceDialect,
19593            >,
19594            offset: usize,
19595            mut depth: fidl::encoding::Depth,
19596        ) -> fidl::Result<()> {
19597            decoder.debug_check_bounds::<Self>(offset);
19598            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19599                None => return Err(fidl::Error::NotNullable),
19600                Some(len) => len,
19601            };
19602            // Calling decoder.out_of_line_offset(0) is not allowed.
19603            if len == 0 {
19604                return Ok(());
19605            };
19606            depth.increment()?;
19607            let envelope_size = 8;
19608            let bytes_len = len * envelope_size;
19609            let offset = decoder.out_of_line_offset(bytes_len)?;
19610            // Decode the envelope for each type.
19611            let mut _next_ordinal_to_read = 0;
19612            let mut next_offset = offset;
19613            let end_offset = offset + bytes_len;
19614            _next_ordinal_to_read += 1;
19615            if next_offset >= end_offset {
19616                return Ok(());
19617            }
19618
19619            // Decode unknown envelopes for gaps in ordinals.
19620            while _next_ordinal_to_read < 1 {
19621                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19622                _next_ordinal_to_read += 1;
19623                next_offset += envelope_size;
19624            }
19625
19626            let next_out_of_line = decoder.next_out_of_line();
19627            let handles_before = decoder.remaining_handles();
19628            if let Some((inlined, num_bytes, num_handles)) =
19629                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19630            {
19631                let member_inline_size =
19632                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19633                if inlined != (member_inline_size <= 4) {
19634                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19635                }
19636                let inner_offset;
19637                let mut inner_depth = depth.clone();
19638                if inlined {
19639                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19640                    inner_offset = next_offset;
19641                } else {
19642                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19643                    inner_depth.increment()?;
19644                }
19645                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
19646                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
19647                });
19648                fidl::decode!(
19649                    fidl::Rights,
19650                    fidl::encoding::DefaultFuchsiaResourceDialect,
19651                    val_ref,
19652                    decoder,
19653                    inner_offset,
19654                    inner_depth
19655                )?;
19656                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19657                {
19658                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19659                }
19660                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19661                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19662                }
19663            }
19664
19665            next_offset += envelope_size;
19666            _next_ordinal_to_read += 1;
19667            if next_offset >= end_offset {
19668                return Ok(());
19669            }
19670
19671            // Decode unknown envelopes for gaps in ordinals.
19672            while _next_ordinal_to_read < 2 {
19673                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19674                _next_ordinal_to_read += 1;
19675                next_offset += envelope_size;
19676            }
19677
19678            let next_out_of_line = decoder.next_out_of_line();
19679            let handles_before = decoder.remaining_handles();
19680            if let Some((inlined, num_bytes, num_handles)) =
19681                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19682            {
19683                let member_inline_size = <fidl::encoding::Endpoint<
19684                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19685                > as fidl::encoding::TypeMarker>::inline_size(
19686                    decoder.context
19687                );
19688                if inlined != (member_inline_size <= 4) {
19689                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19690                }
19691                let inner_offset;
19692                let mut inner_depth = depth.clone();
19693                if inlined {
19694                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19695                    inner_offset = next_offset;
19696                } else {
19697                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19698                    inner_depth.increment()?;
19699                }
19700                let val_ref = self.token_request.get_or_insert_with(|| {
19701                    fidl::new_empty!(
19702                        fidl::encoding::Endpoint<
19703                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19704                        >,
19705                        fidl::encoding::DefaultFuchsiaResourceDialect
19706                    )
19707                });
19708                fidl::decode!(
19709                    fidl::encoding::Endpoint<
19710                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19711                    >,
19712                    fidl::encoding::DefaultFuchsiaResourceDialect,
19713                    val_ref,
19714                    decoder,
19715                    inner_offset,
19716                    inner_depth
19717                )?;
19718                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19719                {
19720                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19721                }
19722                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19723                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19724                }
19725            }
19726
19727            next_offset += envelope_size;
19728
19729            // Decode the remaining unknown envelopes.
19730            while next_offset < end_offset {
19731                _next_ordinal_to_read += 1;
19732                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19733                next_offset += envelope_size;
19734            }
19735
19736            Ok(())
19737        }
19738    }
19739
19740    impl BufferCollectionTokenGroupCreateChildRequest {
19741        #[inline(always)]
19742        fn max_ordinal_present(&self) -> u64 {
19743            if let Some(_) = self.rights_attenuation_mask {
19744                return 2;
19745            }
19746            if let Some(_) = self.token_request {
19747                return 1;
19748            }
19749            0
19750        }
19751    }
19752
19753    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19754        type Borrowed<'a> = &'a mut Self;
19755        fn take_or_borrow<'a>(
19756            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19757        ) -> Self::Borrowed<'a> {
19758            value
19759        }
19760    }
19761
19762    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19763        type Owned = Self;
19764
19765        #[inline(always)]
19766        fn inline_align(_context: fidl::encoding::Context) -> usize {
19767            8
19768        }
19769
19770        #[inline(always)]
19771        fn inline_size(_context: fidl::encoding::Context) -> usize {
19772            16
19773        }
19774    }
19775
19776    unsafe impl
19777        fidl::encoding::Encode<
19778            BufferCollectionTokenGroupCreateChildRequest,
19779            fidl::encoding::DefaultFuchsiaResourceDialect,
19780        > for &mut BufferCollectionTokenGroupCreateChildRequest
19781    {
19782        unsafe fn encode(
19783            self,
19784            encoder: &mut fidl::encoding::Encoder<
19785                '_,
19786                fidl::encoding::DefaultFuchsiaResourceDialect,
19787            >,
19788            offset: usize,
19789            mut depth: fidl::encoding::Depth,
19790        ) -> fidl::Result<()> {
19791            encoder.debug_check_bounds::<BufferCollectionTokenGroupCreateChildRequest>(offset);
19792            // Vector header
19793            let max_ordinal: u64 = self.max_ordinal_present();
19794            encoder.write_num(max_ordinal, offset);
19795            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19796            // Calling encoder.out_of_line_offset(0) is not allowed.
19797            if max_ordinal == 0 {
19798                return Ok(());
19799            }
19800            depth.increment()?;
19801            let envelope_size = 8;
19802            let bytes_len = max_ordinal as usize * envelope_size;
19803            #[allow(unused_variables)]
19804            let offset = encoder.out_of_line_offset(bytes_len);
19805            let mut _prev_end_offset: usize = 0;
19806            if 1 > max_ordinal {
19807                return Ok(());
19808            }
19809
19810            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19811            // are envelope_size bytes.
19812            let cur_offset: usize = (1 - 1) * envelope_size;
19813
19814            // Zero reserved fields.
19815            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19816
19817            // Safety:
19818            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19819            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19820            //   envelope_size bytes, there is always sufficient room.
19821            fidl::encoding::encode_in_envelope_optional::<
19822                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19823                fidl::encoding::DefaultFuchsiaResourceDialect,
19824            >(
19825                self.token_request.as_mut().map(
19826                    <fidl::encoding::Endpoint<
19827                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19828                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19829                ),
19830                encoder,
19831                offset + cur_offset,
19832                depth,
19833            )?;
19834
19835            _prev_end_offset = cur_offset + envelope_size;
19836            if 2 > max_ordinal {
19837                return Ok(());
19838            }
19839
19840            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19841            // are envelope_size bytes.
19842            let cur_offset: usize = (2 - 1) * envelope_size;
19843
19844            // Zero reserved fields.
19845            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19846
19847            // Safety:
19848            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19849            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19850            //   envelope_size bytes, there is always sufficient room.
19851            fidl::encoding::encode_in_envelope_optional::<
19852                fidl::Rights,
19853                fidl::encoding::DefaultFuchsiaResourceDialect,
19854            >(
19855                self.rights_attenuation_mask
19856                    .as_ref()
19857                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19858                encoder,
19859                offset + cur_offset,
19860                depth,
19861            )?;
19862
19863            _prev_end_offset = cur_offset + envelope_size;
19864
19865            Ok(())
19866        }
19867    }
19868
19869    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19870        for BufferCollectionTokenGroupCreateChildRequest
19871    {
19872        #[inline(always)]
19873        fn new_empty() -> Self {
19874            Self::default()
19875        }
19876
19877        unsafe fn decode(
19878            &mut self,
19879            decoder: &mut fidl::encoding::Decoder<
19880                '_,
19881                fidl::encoding::DefaultFuchsiaResourceDialect,
19882            >,
19883            offset: usize,
19884            mut depth: fidl::encoding::Depth,
19885        ) -> fidl::Result<()> {
19886            decoder.debug_check_bounds::<Self>(offset);
19887            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19888                None => return Err(fidl::Error::NotNullable),
19889                Some(len) => len,
19890            };
19891            // Calling decoder.out_of_line_offset(0) is not allowed.
19892            if len == 0 {
19893                return Ok(());
19894            };
19895            depth.increment()?;
19896            let envelope_size = 8;
19897            let bytes_len = len * envelope_size;
19898            let offset = decoder.out_of_line_offset(bytes_len)?;
19899            // Decode the envelope for each type.
19900            let mut _next_ordinal_to_read = 0;
19901            let mut next_offset = offset;
19902            let end_offset = offset + bytes_len;
19903            _next_ordinal_to_read += 1;
19904            if next_offset >= end_offset {
19905                return Ok(());
19906            }
19907
19908            // Decode unknown envelopes for gaps in ordinals.
19909            while _next_ordinal_to_read < 1 {
19910                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19911                _next_ordinal_to_read += 1;
19912                next_offset += envelope_size;
19913            }
19914
19915            let next_out_of_line = decoder.next_out_of_line();
19916            let handles_before = decoder.remaining_handles();
19917            if let Some((inlined, num_bytes, num_handles)) =
19918                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19919            {
19920                let member_inline_size = <fidl::encoding::Endpoint<
19921                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19922                > as fidl::encoding::TypeMarker>::inline_size(
19923                    decoder.context
19924                );
19925                if inlined != (member_inline_size <= 4) {
19926                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19927                }
19928                let inner_offset;
19929                let mut inner_depth = depth.clone();
19930                if inlined {
19931                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19932                    inner_offset = next_offset;
19933                } else {
19934                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19935                    inner_depth.increment()?;
19936                }
19937                let val_ref = self.token_request.get_or_insert_with(|| {
19938                    fidl::new_empty!(
19939                        fidl::encoding::Endpoint<
19940                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19941                        >,
19942                        fidl::encoding::DefaultFuchsiaResourceDialect
19943                    )
19944                });
19945                fidl::decode!(
19946                    fidl::encoding::Endpoint<
19947                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19948                    >,
19949                    fidl::encoding::DefaultFuchsiaResourceDialect,
19950                    val_ref,
19951                    decoder,
19952                    inner_offset,
19953                    inner_depth
19954                )?;
19955                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19956                {
19957                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19958                }
19959                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19960                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19961                }
19962            }
19963
19964            next_offset += envelope_size;
19965            _next_ordinal_to_read += 1;
19966            if next_offset >= end_offset {
19967                return Ok(());
19968            }
19969
19970            // Decode unknown envelopes for gaps in ordinals.
19971            while _next_ordinal_to_read < 2 {
19972                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19973                _next_ordinal_to_read += 1;
19974                next_offset += envelope_size;
19975            }
19976
19977            let next_out_of_line = decoder.next_out_of_line();
19978            let handles_before = decoder.remaining_handles();
19979            if let Some((inlined, num_bytes, num_handles)) =
19980                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19981            {
19982                let member_inline_size =
19983                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19984                if inlined != (member_inline_size <= 4) {
19985                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19986                }
19987                let inner_offset;
19988                let mut inner_depth = depth.clone();
19989                if inlined {
19990                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19991                    inner_offset = next_offset;
19992                } else {
19993                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19994                    inner_depth.increment()?;
19995                }
19996                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
19997                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
19998                });
19999                fidl::decode!(
20000                    fidl::Rights,
20001                    fidl::encoding::DefaultFuchsiaResourceDialect,
20002                    val_ref,
20003                    decoder,
20004                    inner_offset,
20005                    inner_depth
20006                )?;
20007                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20008                {
20009                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20010                }
20011                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20012                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20013                }
20014            }
20015
20016            next_offset += envelope_size;
20017
20018            // Decode the remaining unknown envelopes.
20019            while next_offset < end_offset {
20020                _next_ordinal_to_read += 1;
20021                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20022                next_offset += envelope_size;
20023            }
20024
20025            Ok(())
20026        }
20027    }
20028
20029    impl BufferCollectionTokenGroupCreateChildrenSyncResponse {
20030        #[inline(always)]
20031        fn max_ordinal_present(&self) -> u64 {
20032            if let Some(_) = self.tokens {
20033                return 1;
20034            }
20035            0
20036        }
20037    }
20038
20039    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20040        type Borrowed<'a> = &'a mut Self;
20041        fn take_or_borrow<'a>(
20042            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20043        ) -> Self::Borrowed<'a> {
20044            value
20045        }
20046    }
20047
20048    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20049        type Owned = Self;
20050
20051        #[inline(always)]
20052        fn inline_align(_context: fidl::encoding::Context) -> usize {
20053            8
20054        }
20055
20056        #[inline(always)]
20057        fn inline_size(_context: fidl::encoding::Context) -> usize {
20058            16
20059        }
20060    }
20061
20062    unsafe impl
20063        fidl::encoding::Encode<
20064            BufferCollectionTokenGroupCreateChildrenSyncResponse,
20065            fidl::encoding::DefaultFuchsiaResourceDialect,
20066        > for &mut BufferCollectionTokenGroupCreateChildrenSyncResponse
20067    {
20068        unsafe fn encode(
20069            self,
20070            encoder: &mut fidl::encoding::Encoder<
20071                '_,
20072                fidl::encoding::DefaultFuchsiaResourceDialect,
20073            >,
20074            offset: usize,
20075            mut depth: fidl::encoding::Depth,
20076        ) -> fidl::Result<()> {
20077            encoder
20078                .debug_check_bounds::<BufferCollectionTokenGroupCreateChildrenSyncResponse>(offset);
20079            // Vector header
20080            let max_ordinal: u64 = self.max_ordinal_present();
20081            encoder.write_num(max_ordinal, offset);
20082            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20083            // Calling encoder.out_of_line_offset(0) is not allowed.
20084            if max_ordinal == 0 {
20085                return Ok(());
20086            }
20087            depth.increment()?;
20088            let envelope_size = 8;
20089            let bytes_len = max_ordinal as usize * envelope_size;
20090            #[allow(unused_variables)]
20091            let offset = encoder.out_of_line_offset(bytes_len);
20092            let mut _prev_end_offset: usize = 0;
20093            if 1 > max_ordinal {
20094                return Ok(());
20095            }
20096
20097            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20098            // are envelope_size bytes.
20099            let cur_offset: usize = (1 - 1) * envelope_size;
20100
20101            // Zero reserved fields.
20102            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20103
20104            // Safety:
20105            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20106            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20107            //   envelope_size bytes, there is always sufficient room.
20108            fidl::encoding::encode_in_envelope_optional::<
20109                fidl::encoding::Vector<
20110                    fidl::encoding::Endpoint<
20111                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20112                    >,
20113                    64,
20114                >,
20115                fidl::encoding::DefaultFuchsiaResourceDialect,
20116            >(
20117                self.tokens.as_mut().map(
20118                    <fidl::encoding::Vector<
20119                        fidl::encoding::Endpoint<
20120                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20121                        >,
20122                        64,
20123                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20124                ),
20125                encoder,
20126                offset + cur_offset,
20127                depth,
20128            )?;
20129
20130            _prev_end_offset = cur_offset + envelope_size;
20131
20132            Ok(())
20133        }
20134    }
20135
20136    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20137        for BufferCollectionTokenGroupCreateChildrenSyncResponse
20138    {
20139        #[inline(always)]
20140        fn new_empty() -> Self {
20141            Self::default()
20142        }
20143
20144        unsafe fn decode(
20145            &mut self,
20146            decoder: &mut fidl::encoding::Decoder<
20147                '_,
20148                fidl::encoding::DefaultFuchsiaResourceDialect,
20149            >,
20150            offset: usize,
20151            mut depth: fidl::encoding::Depth,
20152        ) -> fidl::Result<()> {
20153            decoder.debug_check_bounds::<Self>(offset);
20154            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20155                None => return Err(fidl::Error::NotNullable),
20156                Some(len) => len,
20157            };
20158            // Calling decoder.out_of_line_offset(0) is not allowed.
20159            if len == 0 {
20160                return Ok(());
20161            };
20162            depth.increment()?;
20163            let envelope_size = 8;
20164            let bytes_len = len * envelope_size;
20165            let offset = decoder.out_of_line_offset(bytes_len)?;
20166            // Decode the envelope for each type.
20167            let mut _next_ordinal_to_read = 0;
20168            let mut next_offset = offset;
20169            let end_offset = offset + bytes_len;
20170            _next_ordinal_to_read += 1;
20171            if next_offset >= end_offset {
20172                return Ok(());
20173            }
20174
20175            // Decode unknown envelopes for gaps in ordinals.
20176            while _next_ordinal_to_read < 1 {
20177                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20178                _next_ordinal_to_read += 1;
20179                next_offset += envelope_size;
20180            }
20181
20182            let next_out_of_line = decoder.next_out_of_line();
20183            let handles_before = decoder.remaining_handles();
20184            if let Some((inlined, num_bytes, num_handles)) =
20185                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20186            {
20187                let member_inline_size = <fidl::encoding::Vector<
20188                    fidl::encoding::Endpoint<
20189                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20190                    >,
20191                    64,
20192                > as fidl::encoding::TypeMarker>::inline_size(
20193                    decoder.context
20194                );
20195                if inlined != (member_inline_size <= 4) {
20196                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20197                }
20198                let inner_offset;
20199                let mut inner_depth = depth.clone();
20200                if inlined {
20201                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20202                    inner_offset = next_offset;
20203                } else {
20204                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20205                    inner_depth.increment()?;
20206                }
20207                let val_ref = self.tokens.get_or_insert_with(|| {
20208                    fidl::new_empty!(
20209                        fidl::encoding::Vector<
20210                            fidl::encoding::Endpoint<
20211                                fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20212                            >,
20213                            64,
20214                        >,
20215                        fidl::encoding::DefaultFuchsiaResourceDialect
20216                    )
20217                });
20218                fidl::decode!(
20219                    fidl::encoding::Vector<
20220                        fidl::encoding::Endpoint<
20221                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20222                        >,
20223                        64,
20224                    >,
20225                    fidl::encoding::DefaultFuchsiaResourceDialect,
20226                    val_ref,
20227                    decoder,
20228                    inner_offset,
20229                    inner_depth
20230                )?;
20231                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20232                {
20233                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20234                }
20235                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20236                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20237                }
20238            }
20239
20240            next_offset += envelope_size;
20241
20242            // Decode the remaining unknown envelopes.
20243            while next_offset < end_offset {
20244                _next_ordinal_to_read += 1;
20245                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20246                next_offset += envelope_size;
20247            }
20248
20249            Ok(())
20250        }
20251    }
20252
20253    impl BufferCollectionTokenDuplicateSyncResponse {
20254        #[inline(always)]
20255        fn max_ordinal_present(&self) -> u64 {
20256            if let Some(_) = self.tokens {
20257                return 1;
20258            }
20259            0
20260        }
20261    }
20262
20263    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20264        type Borrowed<'a> = &'a mut Self;
20265        fn take_or_borrow<'a>(
20266            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20267        ) -> Self::Borrowed<'a> {
20268            value
20269        }
20270    }
20271
20272    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20273        type Owned = Self;
20274
20275        #[inline(always)]
20276        fn inline_align(_context: fidl::encoding::Context) -> usize {
20277            8
20278        }
20279
20280        #[inline(always)]
20281        fn inline_size(_context: fidl::encoding::Context) -> usize {
20282            16
20283        }
20284    }
20285
20286    unsafe impl
20287        fidl::encoding::Encode<
20288            BufferCollectionTokenDuplicateSyncResponse,
20289            fidl::encoding::DefaultFuchsiaResourceDialect,
20290        > for &mut BufferCollectionTokenDuplicateSyncResponse
20291    {
20292        unsafe fn encode(
20293            self,
20294            encoder: &mut fidl::encoding::Encoder<
20295                '_,
20296                fidl::encoding::DefaultFuchsiaResourceDialect,
20297            >,
20298            offset: usize,
20299            mut depth: fidl::encoding::Depth,
20300        ) -> fidl::Result<()> {
20301            encoder.debug_check_bounds::<BufferCollectionTokenDuplicateSyncResponse>(offset);
20302            // Vector header
20303            let max_ordinal: u64 = self.max_ordinal_present();
20304            encoder.write_num(max_ordinal, offset);
20305            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20306            // Calling encoder.out_of_line_offset(0) is not allowed.
20307            if max_ordinal == 0 {
20308                return Ok(());
20309            }
20310            depth.increment()?;
20311            let envelope_size = 8;
20312            let bytes_len = max_ordinal as usize * envelope_size;
20313            #[allow(unused_variables)]
20314            let offset = encoder.out_of_line_offset(bytes_len);
20315            let mut _prev_end_offset: usize = 0;
20316            if 1 > max_ordinal {
20317                return Ok(());
20318            }
20319
20320            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20321            // are envelope_size bytes.
20322            let cur_offset: usize = (1 - 1) * envelope_size;
20323
20324            // Zero reserved fields.
20325            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20326
20327            // Safety:
20328            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20329            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20330            //   envelope_size bytes, there is always sufficient room.
20331            fidl::encoding::encode_in_envelope_optional::<
20332                fidl::encoding::Vector<
20333                    fidl::encoding::Endpoint<
20334                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20335                    >,
20336                    64,
20337                >,
20338                fidl::encoding::DefaultFuchsiaResourceDialect,
20339            >(
20340                self.tokens.as_mut().map(
20341                    <fidl::encoding::Vector<
20342                        fidl::encoding::Endpoint<
20343                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20344                        >,
20345                        64,
20346                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20347                ),
20348                encoder,
20349                offset + cur_offset,
20350                depth,
20351            )?;
20352
20353            _prev_end_offset = cur_offset + envelope_size;
20354
20355            Ok(())
20356        }
20357    }
20358
20359    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20360        for BufferCollectionTokenDuplicateSyncResponse
20361    {
20362        #[inline(always)]
20363        fn new_empty() -> Self {
20364            Self::default()
20365        }
20366
20367        unsafe fn decode(
20368            &mut self,
20369            decoder: &mut fidl::encoding::Decoder<
20370                '_,
20371                fidl::encoding::DefaultFuchsiaResourceDialect,
20372            >,
20373            offset: usize,
20374            mut depth: fidl::encoding::Depth,
20375        ) -> fidl::Result<()> {
20376            decoder.debug_check_bounds::<Self>(offset);
20377            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20378                None => return Err(fidl::Error::NotNullable),
20379                Some(len) => len,
20380            };
20381            // Calling decoder.out_of_line_offset(0) is not allowed.
20382            if len == 0 {
20383                return Ok(());
20384            };
20385            depth.increment()?;
20386            let envelope_size = 8;
20387            let bytes_len = len * envelope_size;
20388            let offset = decoder.out_of_line_offset(bytes_len)?;
20389            // Decode the envelope for each type.
20390            let mut _next_ordinal_to_read = 0;
20391            let mut next_offset = offset;
20392            let end_offset = offset + bytes_len;
20393            _next_ordinal_to_read += 1;
20394            if next_offset >= end_offset {
20395                return Ok(());
20396            }
20397
20398            // Decode unknown envelopes for gaps in ordinals.
20399            while _next_ordinal_to_read < 1 {
20400                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20401                _next_ordinal_to_read += 1;
20402                next_offset += envelope_size;
20403            }
20404
20405            let next_out_of_line = decoder.next_out_of_line();
20406            let handles_before = decoder.remaining_handles();
20407            if let Some((inlined, num_bytes, num_handles)) =
20408                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20409            {
20410                let member_inline_size = <fidl::encoding::Vector<
20411                    fidl::encoding::Endpoint<
20412                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20413                    >,
20414                    64,
20415                > as fidl::encoding::TypeMarker>::inline_size(
20416                    decoder.context
20417                );
20418                if inlined != (member_inline_size <= 4) {
20419                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20420                }
20421                let inner_offset;
20422                let mut inner_depth = depth.clone();
20423                if inlined {
20424                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20425                    inner_offset = next_offset;
20426                } else {
20427                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20428                    inner_depth.increment()?;
20429                }
20430                let val_ref = self.tokens.get_or_insert_with(|| {
20431                    fidl::new_empty!(
20432                        fidl::encoding::Vector<
20433                            fidl::encoding::Endpoint<
20434                                fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20435                            >,
20436                            64,
20437                        >,
20438                        fidl::encoding::DefaultFuchsiaResourceDialect
20439                    )
20440                });
20441                fidl::decode!(
20442                    fidl::encoding::Vector<
20443                        fidl::encoding::Endpoint<
20444                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20445                        >,
20446                        64,
20447                    >,
20448                    fidl::encoding::DefaultFuchsiaResourceDialect,
20449                    val_ref,
20450                    decoder,
20451                    inner_offset,
20452                    inner_depth
20453                )?;
20454                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20455                {
20456                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20457                }
20458                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20459                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20460                }
20461            }
20462
20463            next_offset += envelope_size;
20464
20465            // Decode the remaining unknown envelopes.
20466            while next_offset < end_offset {
20467                _next_ordinal_to_read += 1;
20468                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20469                next_offset += envelope_size;
20470            }
20471
20472            Ok(())
20473        }
20474    }
20475
20476    impl BufferCollectionWaitForAllBuffersAllocatedResponse {
20477        #[inline(always)]
20478        fn max_ordinal_present(&self) -> u64 {
20479            if let Some(_) = self.buffer_collection_info {
20480                return 1;
20481            }
20482            0
20483        }
20484    }
20485
20486    impl fidl::encoding::ResourceTypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20487        type Borrowed<'a> = &'a mut Self;
20488        fn take_or_borrow<'a>(
20489            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20490        ) -> Self::Borrowed<'a> {
20491            value
20492        }
20493    }
20494
20495    unsafe impl fidl::encoding::TypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20496        type Owned = Self;
20497
20498        #[inline(always)]
20499        fn inline_align(_context: fidl::encoding::Context) -> usize {
20500            8
20501        }
20502
20503        #[inline(always)]
20504        fn inline_size(_context: fidl::encoding::Context) -> usize {
20505            16
20506        }
20507    }
20508
20509    unsafe impl
20510        fidl::encoding::Encode<
20511            BufferCollectionWaitForAllBuffersAllocatedResponse,
20512            fidl::encoding::DefaultFuchsiaResourceDialect,
20513        > for &mut BufferCollectionWaitForAllBuffersAllocatedResponse
20514    {
20515        unsafe fn encode(
20516            self,
20517            encoder: &mut fidl::encoding::Encoder<
20518                '_,
20519                fidl::encoding::DefaultFuchsiaResourceDialect,
20520            >,
20521            offset: usize,
20522            mut depth: fidl::encoding::Depth,
20523        ) -> fidl::Result<()> {
20524            encoder
20525                .debug_check_bounds::<BufferCollectionWaitForAllBuffersAllocatedResponse>(offset);
20526            // Vector header
20527            let max_ordinal: u64 = self.max_ordinal_present();
20528            encoder.write_num(max_ordinal, offset);
20529            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20530            // Calling encoder.out_of_line_offset(0) is not allowed.
20531            if max_ordinal == 0 {
20532                return Ok(());
20533            }
20534            depth.increment()?;
20535            let envelope_size = 8;
20536            let bytes_len = max_ordinal as usize * envelope_size;
20537            #[allow(unused_variables)]
20538            let offset = encoder.out_of_line_offset(bytes_len);
20539            let mut _prev_end_offset: usize = 0;
20540            if 1 > max_ordinal {
20541                return Ok(());
20542            }
20543
20544            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20545            // are envelope_size bytes.
20546            let cur_offset: usize = (1 - 1) * envelope_size;
20547
20548            // Zero reserved fields.
20549            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20550
20551            // Safety:
20552            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20553            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20554            //   envelope_size bytes, there is always sufficient room.
20555            fidl::encoding::encode_in_envelope_optional::<
20556                BufferCollectionInfo,
20557                fidl::encoding::DefaultFuchsiaResourceDialect,
20558            >(
20559                self.buffer_collection_info.as_mut().map(
20560                    <BufferCollectionInfo as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20561                ),
20562                encoder,
20563                offset + cur_offset,
20564                depth,
20565            )?;
20566
20567            _prev_end_offset = cur_offset + envelope_size;
20568
20569            Ok(())
20570        }
20571    }
20572
20573    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20574        for BufferCollectionWaitForAllBuffersAllocatedResponse
20575    {
20576        #[inline(always)]
20577        fn new_empty() -> Self {
20578            Self::default()
20579        }
20580
20581        unsafe fn decode(
20582            &mut self,
20583            decoder: &mut fidl::encoding::Decoder<
20584                '_,
20585                fidl::encoding::DefaultFuchsiaResourceDialect,
20586            >,
20587            offset: usize,
20588            mut depth: fidl::encoding::Depth,
20589        ) -> fidl::Result<()> {
20590            decoder.debug_check_bounds::<Self>(offset);
20591            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20592                None => return Err(fidl::Error::NotNullable),
20593                Some(len) => len,
20594            };
20595            // Calling decoder.out_of_line_offset(0) is not allowed.
20596            if len == 0 {
20597                return Ok(());
20598            };
20599            depth.increment()?;
20600            let envelope_size = 8;
20601            let bytes_len = len * envelope_size;
20602            let offset = decoder.out_of_line_offset(bytes_len)?;
20603            // Decode the envelope for each type.
20604            let mut _next_ordinal_to_read = 0;
20605            let mut next_offset = offset;
20606            let end_offset = offset + bytes_len;
20607            _next_ordinal_to_read += 1;
20608            if next_offset >= end_offset {
20609                return Ok(());
20610            }
20611
20612            // Decode unknown envelopes for gaps in ordinals.
20613            while _next_ordinal_to_read < 1 {
20614                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20615                _next_ordinal_to_read += 1;
20616                next_offset += envelope_size;
20617            }
20618
20619            let next_out_of_line = decoder.next_out_of_line();
20620            let handles_before = decoder.remaining_handles();
20621            if let Some((inlined, num_bytes, num_handles)) =
20622                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20623            {
20624                let member_inline_size =
20625                    <BufferCollectionInfo as fidl::encoding::TypeMarker>::inline_size(
20626                        decoder.context,
20627                    );
20628                if inlined != (member_inline_size <= 4) {
20629                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20630                }
20631                let inner_offset;
20632                let mut inner_depth = depth.clone();
20633                if inlined {
20634                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20635                    inner_offset = next_offset;
20636                } else {
20637                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20638                    inner_depth.increment()?;
20639                }
20640                let val_ref = self.buffer_collection_info.get_or_insert_with(|| {
20641                    fidl::new_empty!(
20642                        BufferCollectionInfo,
20643                        fidl::encoding::DefaultFuchsiaResourceDialect
20644                    )
20645                });
20646                fidl::decode!(
20647                    BufferCollectionInfo,
20648                    fidl::encoding::DefaultFuchsiaResourceDialect,
20649                    val_ref,
20650                    decoder,
20651                    inner_offset,
20652                    inner_depth
20653                )?;
20654                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20655                {
20656                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20657                }
20658                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20659                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20660                }
20661            }
20662
20663            next_offset += envelope_size;
20664
20665            // Decode the remaining unknown envelopes.
20666            while next_offset < end_offset {
20667                _next_ordinal_to_read += 1;
20668                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20669                next_offset += envelope_size;
20670            }
20671
20672            Ok(())
20673        }
20674    }
20675
20676    impl NodeAttachNodeTrackingRequest {
20677        #[inline(always)]
20678        fn max_ordinal_present(&self) -> u64 {
20679            if let Some(_) = self.server_end {
20680                return 1;
20681            }
20682            0
20683        }
20684    }
20685
20686    impl fidl::encoding::ResourceTypeMarker for NodeAttachNodeTrackingRequest {
20687        type Borrowed<'a> = &'a mut Self;
20688        fn take_or_borrow<'a>(
20689            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20690        ) -> Self::Borrowed<'a> {
20691            value
20692        }
20693    }
20694
20695    unsafe impl fidl::encoding::TypeMarker for NodeAttachNodeTrackingRequest {
20696        type Owned = Self;
20697
20698        #[inline(always)]
20699        fn inline_align(_context: fidl::encoding::Context) -> usize {
20700            8
20701        }
20702
20703        #[inline(always)]
20704        fn inline_size(_context: fidl::encoding::Context) -> usize {
20705            16
20706        }
20707    }
20708
20709    unsafe impl
20710        fidl::encoding::Encode<
20711            NodeAttachNodeTrackingRequest,
20712            fidl::encoding::DefaultFuchsiaResourceDialect,
20713        > for &mut NodeAttachNodeTrackingRequest
20714    {
20715        unsafe fn encode(
20716            self,
20717            encoder: &mut fidl::encoding::Encoder<
20718                '_,
20719                fidl::encoding::DefaultFuchsiaResourceDialect,
20720            >,
20721            offset: usize,
20722            mut depth: fidl::encoding::Depth,
20723        ) -> fidl::Result<()> {
20724            encoder.debug_check_bounds::<NodeAttachNodeTrackingRequest>(offset);
20725            // Vector header
20726            let max_ordinal: u64 = self.max_ordinal_present();
20727            encoder.write_num(max_ordinal, offset);
20728            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20729            // Calling encoder.out_of_line_offset(0) is not allowed.
20730            if max_ordinal == 0 {
20731                return Ok(());
20732            }
20733            depth.increment()?;
20734            let envelope_size = 8;
20735            let bytes_len = max_ordinal as usize * envelope_size;
20736            #[allow(unused_variables)]
20737            let offset = encoder.out_of_line_offset(bytes_len);
20738            let mut _prev_end_offset: usize = 0;
20739            if 1 > max_ordinal {
20740                return Ok(());
20741            }
20742
20743            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20744            // are envelope_size bytes.
20745            let cur_offset: usize = (1 - 1) * envelope_size;
20746
20747            // Zero reserved fields.
20748            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20749
20750            // Safety:
20751            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20752            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20753            //   envelope_size bytes, there is always sufficient room.
20754            fidl::encoding::encode_in_envelope_optional::<
20755                fidl::encoding::HandleType<
20756                    fidl::EventPair,
20757                    { fidl::ObjectType::EVENTPAIR.into_raw() },
20758                    2147483648,
20759                >,
20760                fidl::encoding::DefaultFuchsiaResourceDialect,
20761            >(
20762                self.server_end.as_mut().map(
20763                    <fidl::encoding::HandleType<
20764                        fidl::EventPair,
20765                        { fidl::ObjectType::EVENTPAIR.into_raw() },
20766                        2147483648,
20767                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20768                ),
20769                encoder,
20770                offset + cur_offset,
20771                depth,
20772            )?;
20773
20774            _prev_end_offset = cur_offset + envelope_size;
20775
20776            Ok(())
20777        }
20778    }
20779
20780    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20781        for NodeAttachNodeTrackingRequest
20782    {
20783        #[inline(always)]
20784        fn new_empty() -> Self {
20785            Self::default()
20786        }
20787
20788        unsafe fn decode(
20789            &mut self,
20790            decoder: &mut fidl::encoding::Decoder<
20791                '_,
20792                fidl::encoding::DefaultFuchsiaResourceDialect,
20793            >,
20794            offset: usize,
20795            mut depth: fidl::encoding::Depth,
20796        ) -> fidl::Result<()> {
20797            decoder.debug_check_bounds::<Self>(offset);
20798            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20799                None => return Err(fidl::Error::NotNullable),
20800                Some(len) => len,
20801            };
20802            // Calling decoder.out_of_line_offset(0) is not allowed.
20803            if len == 0 {
20804                return Ok(());
20805            };
20806            depth.increment()?;
20807            let envelope_size = 8;
20808            let bytes_len = len * envelope_size;
20809            let offset = decoder.out_of_line_offset(bytes_len)?;
20810            // Decode the envelope for each type.
20811            let mut _next_ordinal_to_read = 0;
20812            let mut next_offset = offset;
20813            let end_offset = offset + bytes_len;
20814            _next_ordinal_to_read += 1;
20815            if next_offset >= end_offset {
20816                return Ok(());
20817            }
20818
20819            // Decode unknown envelopes for gaps in ordinals.
20820            while _next_ordinal_to_read < 1 {
20821                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20822                _next_ordinal_to_read += 1;
20823                next_offset += envelope_size;
20824            }
20825
20826            let next_out_of_line = decoder.next_out_of_line();
20827            let handles_before = decoder.remaining_handles();
20828            if let Some((inlined, num_bytes, num_handles)) =
20829                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20830            {
20831                let member_inline_size = <fidl::encoding::HandleType<
20832                    fidl::EventPair,
20833                    { fidl::ObjectType::EVENTPAIR.into_raw() },
20834                    2147483648,
20835                > as fidl::encoding::TypeMarker>::inline_size(
20836                    decoder.context
20837                );
20838                if inlined != (member_inline_size <= 4) {
20839                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20840                }
20841                let inner_offset;
20842                let mut inner_depth = depth.clone();
20843                if inlined {
20844                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20845                    inner_offset = next_offset;
20846                } else {
20847                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20848                    inner_depth.increment()?;
20849                }
20850                let val_ref =
20851                self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
20852                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
20853                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20854                {
20855                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20856                }
20857                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20858                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20859                }
20860            }
20861
20862            next_offset += envelope_size;
20863
20864            // Decode the remaining unknown envelopes.
20865            while next_offset < end_offset {
20866                _next_ordinal_to_read += 1;
20867                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20868                next_offset += envelope_size;
20869            }
20870
20871            Ok(())
20872        }
20873    }
20874
20875    impl NodeIsAlternateForRequest {
20876        #[inline(always)]
20877        fn max_ordinal_present(&self) -> u64 {
20878            if let Some(_) = self.node_ref {
20879                return 1;
20880            }
20881            0
20882        }
20883    }
20884
20885    impl fidl::encoding::ResourceTypeMarker for NodeIsAlternateForRequest {
20886        type Borrowed<'a> = &'a mut Self;
20887        fn take_or_borrow<'a>(
20888            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20889        ) -> Self::Borrowed<'a> {
20890            value
20891        }
20892    }
20893
20894    unsafe impl fidl::encoding::TypeMarker for NodeIsAlternateForRequest {
20895        type Owned = Self;
20896
20897        #[inline(always)]
20898        fn inline_align(_context: fidl::encoding::Context) -> usize {
20899            8
20900        }
20901
20902        #[inline(always)]
20903        fn inline_size(_context: fidl::encoding::Context) -> usize {
20904            16
20905        }
20906    }
20907
20908    unsafe impl
20909        fidl::encoding::Encode<
20910            NodeIsAlternateForRequest,
20911            fidl::encoding::DefaultFuchsiaResourceDialect,
20912        > for &mut NodeIsAlternateForRequest
20913    {
20914        unsafe fn encode(
20915            self,
20916            encoder: &mut fidl::encoding::Encoder<
20917                '_,
20918                fidl::encoding::DefaultFuchsiaResourceDialect,
20919            >,
20920            offset: usize,
20921            mut depth: fidl::encoding::Depth,
20922        ) -> fidl::Result<()> {
20923            encoder.debug_check_bounds::<NodeIsAlternateForRequest>(offset);
20924            // Vector header
20925            let max_ordinal: u64 = self.max_ordinal_present();
20926            encoder.write_num(max_ordinal, offset);
20927            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20928            // Calling encoder.out_of_line_offset(0) is not allowed.
20929            if max_ordinal == 0 {
20930                return Ok(());
20931            }
20932            depth.increment()?;
20933            let envelope_size = 8;
20934            let bytes_len = max_ordinal as usize * envelope_size;
20935            #[allow(unused_variables)]
20936            let offset = encoder.out_of_line_offset(bytes_len);
20937            let mut _prev_end_offset: usize = 0;
20938            if 1 > max_ordinal {
20939                return Ok(());
20940            }
20941
20942            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20943            // are envelope_size bytes.
20944            let cur_offset: usize = (1 - 1) * envelope_size;
20945
20946            // Zero reserved fields.
20947            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20948
20949            // Safety:
20950            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20951            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20952            //   envelope_size bytes, there is always sufficient room.
20953            fidl::encoding::encode_in_envelope_optional::<
20954                fidl::encoding::HandleType<
20955                    fidl::Event,
20956                    { fidl::ObjectType::EVENT.into_raw() },
20957                    2147483648,
20958                >,
20959                fidl::encoding::DefaultFuchsiaResourceDialect,
20960            >(
20961                self.node_ref.as_mut().map(
20962                    <fidl::encoding::HandleType<
20963                        fidl::Event,
20964                        { fidl::ObjectType::EVENT.into_raw() },
20965                        2147483648,
20966                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20967                ),
20968                encoder,
20969                offset + cur_offset,
20970                depth,
20971            )?;
20972
20973            _prev_end_offset = cur_offset + envelope_size;
20974
20975            Ok(())
20976        }
20977    }
20978
20979    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20980        for NodeIsAlternateForRequest
20981    {
20982        #[inline(always)]
20983        fn new_empty() -> Self {
20984            Self::default()
20985        }
20986
20987        unsafe fn decode(
20988            &mut self,
20989            decoder: &mut fidl::encoding::Decoder<
20990                '_,
20991                fidl::encoding::DefaultFuchsiaResourceDialect,
20992            >,
20993            offset: usize,
20994            mut depth: fidl::encoding::Depth,
20995        ) -> fidl::Result<()> {
20996            decoder.debug_check_bounds::<Self>(offset);
20997            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20998                None => return Err(fidl::Error::NotNullable),
20999                Some(len) => len,
21000            };
21001            // Calling decoder.out_of_line_offset(0) is not allowed.
21002            if len == 0 {
21003                return Ok(());
21004            };
21005            depth.increment()?;
21006            let envelope_size = 8;
21007            let bytes_len = len * envelope_size;
21008            let offset = decoder.out_of_line_offset(bytes_len)?;
21009            // Decode the envelope for each type.
21010            let mut _next_ordinal_to_read = 0;
21011            let mut next_offset = offset;
21012            let end_offset = offset + bytes_len;
21013            _next_ordinal_to_read += 1;
21014            if next_offset >= end_offset {
21015                return Ok(());
21016            }
21017
21018            // Decode unknown envelopes for gaps in ordinals.
21019            while _next_ordinal_to_read < 1 {
21020                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21021                _next_ordinal_to_read += 1;
21022                next_offset += envelope_size;
21023            }
21024
21025            let next_out_of_line = decoder.next_out_of_line();
21026            let handles_before = decoder.remaining_handles();
21027            if let Some((inlined, num_bytes, num_handles)) =
21028                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21029            {
21030                let member_inline_size = <fidl::encoding::HandleType<
21031                    fidl::Event,
21032                    { fidl::ObjectType::EVENT.into_raw() },
21033                    2147483648,
21034                > as fidl::encoding::TypeMarker>::inline_size(
21035                    decoder.context
21036                );
21037                if inlined != (member_inline_size <= 4) {
21038                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21039                }
21040                let inner_offset;
21041                let mut inner_depth = depth.clone();
21042                if inlined {
21043                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21044                    inner_offset = next_offset;
21045                } else {
21046                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21047                    inner_depth.increment()?;
21048                }
21049                let val_ref =
21050                self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21051                fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21052                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21053                {
21054                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21055                }
21056                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21057                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21058                }
21059            }
21060
21061            next_offset += envelope_size;
21062
21063            // Decode the remaining unknown envelopes.
21064            while next_offset < end_offset {
21065                _next_ordinal_to_read += 1;
21066                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21067                next_offset += envelope_size;
21068            }
21069
21070            Ok(())
21071        }
21072    }
21073
21074    impl NodeSetWeakOkRequest {
21075        #[inline(always)]
21076        fn max_ordinal_present(&self) -> u64 {
21077            if let Some(_) = self.for_child_nodes_also {
21078                return 1;
21079            }
21080            0
21081        }
21082    }
21083
21084    impl fidl::encoding::ResourceTypeMarker for NodeSetWeakOkRequest {
21085        type Borrowed<'a> = &'a mut Self;
21086        fn take_or_borrow<'a>(
21087            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21088        ) -> Self::Borrowed<'a> {
21089            value
21090        }
21091    }
21092
21093    unsafe impl fidl::encoding::TypeMarker for NodeSetWeakOkRequest {
21094        type Owned = Self;
21095
21096        #[inline(always)]
21097        fn inline_align(_context: fidl::encoding::Context) -> usize {
21098            8
21099        }
21100
21101        #[inline(always)]
21102        fn inline_size(_context: fidl::encoding::Context) -> usize {
21103            16
21104        }
21105    }
21106
21107    unsafe impl
21108        fidl::encoding::Encode<NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect>
21109        for &mut NodeSetWeakOkRequest
21110    {
21111        unsafe fn encode(
21112            self,
21113            encoder: &mut fidl::encoding::Encoder<
21114                '_,
21115                fidl::encoding::DefaultFuchsiaResourceDialect,
21116            >,
21117            offset: usize,
21118            mut depth: fidl::encoding::Depth,
21119        ) -> fidl::Result<()> {
21120            encoder.debug_check_bounds::<NodeSetWeakOkRequest>(offset);
21121            // Vector header
21122            let max_ordinal: u64 = self.max_ordinal_present();
21123            encoder.write_num(max_ordinal, offset);
21124            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21125            // Calling encoder.out_of_line_offset(0) is not allowed.
21126            if max_ordinal == 0 {
21127                return Ok(());
21128            }
21129            depth.increment()?;
21130            let envelope_size = 8;
21131            let bytes_len = max_ordinal as usize * envelope_size;
21132            #[allow(unused_variables)]
21133            let offset = encoder.out_of_line_offset(bytes_len);
21134            let mut _prev_end_offset: usize = 0;
21135            if 1 > max_ordinal {
21136                return Ok(());
21137            }
21138
21139            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21140            // are envelope_size bytes.
21141            let cur_offset: usize = (1 - 1) * envelope_size;
21142
21143            // Zero reserved fields.
21144            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21145
21146            // Safety:
21147            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21148            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21149            //   envelope_size bytes, there is always sufficient room.
21150            fidl::encoding::encode_in_envelope_optional::<
21151                bool,
21152                fidl::encoding::DefaultFuchsiaResourceDialect,
21153            >(
21154                self.for_child_nodes_also
21155                    .as_ref()
21156                    .map(<bool as fidl::encoding::ValueTypeMarker>::borrow),
21157                encoder,
21158                offset + cur_offset,
21159                depth,
21160            )?;
21161
21162            _prev_end_offset = cur_offset + envelope_size;
21163
21164            Ok(())
21165        }
21166    }
21167
21168    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21169        for NodeSetWeakOkRequest
21170    {
21171        #[inline(always)]
21172        fn new_empty() -> Self {
21173            Self::default()
21174        }
21175
21176        unsafe fn decode(
21177            &mut self,
21178            decoder: &mut fidl::encoding::Decoder<
21179                '_,
21180                fidl::encoding::DefaultFuchsiaResourceDialect,
21181            >,
21182            offset: usize,
21183            mut depth: fidl::encoding::Depth,
21184        ) -> fidl::Result<()> {
21185            decoder.debug_check_bounds::<Self>(offset);
21186            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21187                None => return Err(fidl::Error::NotNullable),
21188                Some(len) => len,
21189            };
21190            // Calling decoder.out_of_line_offset(0) is not allowed.
21191            if len == 0 {
21192                return Ok(());
21193            };
21194            depth.increment()?;
21195            let envelope_size = 8;
21196            let bytes_len = len * envelope_size;
21197            let offset = decoder.out_of_line_offset(bytes_len)?;
21198            // Decode the envelope for each type.
21199            let mut _next_ordinal_to_read = 0;
21200            let mut next_offset = offset;
21201            let end_offset = offset + bytes_len;
21202            _next_ordinal_to_read += 1;
21203            if next_offset >= end_offset {
21204                return Ok(());
21205            }
21206
21207            // Decode unknown envelopes for gaps in ordinals.
21208            while _next_ordinal_to_read < 1 {
21209                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21210                _next_ordinal_to_read += 1;
21211                next_offset += envelope_size;
21212            }
21213
21214            let next_out_of_line = decoder.next_out_of_line();
21215            let handles_before = decoder.remaining_handles();
21216            if let Some((inlined, num_bytes, num_handles)) =
21217                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21218            {
21219                let member_inline_size =
21220                    <bool as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21221                if inlined != (member_inline_size <= 4) {
21222                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21223                }
21224                let inner_offset;
21225                let mut inner_depth = depth.clone();
21226                if inlined {
21227                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21228                    inner_offset = next_offset;
21229                } else {
21230                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21231                    inner_depth.increment()?;
21232                }
21233                let val_ref = self.for_child_nodes_also.get_or_insert_with(|| {
21234                    fidl::new_empty!(bool, fidl::encoding::DefaultFuchsiaResourceDialect)
21235                });
21236                fidl::decode!(
21237                    bool,
21238                    fidl::encoding::DefaultFuchsiaResourceDialect,
21239                    val_ref,
21240                    decoder,
21241                    inner_offset,
21242                    inner_depth
21243                )?;
21244                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21245                {
21246                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21247                }
21248                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21249                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21250                }
21251            }
21252
21253            next_offset += envelope_size;
21254
21255            // Decode the remaining unknown envelopes.
21256            while next_offset < end_offset {
21257                _next_ordinal_to_read += 1;
21258                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21259                next_offset += envelope_size;
21260            }
21261
21262            Ok(())
21263        }
21264    }
21265
21266    impl NodeGetNodeRefResponse {
21267        #[inline(always)]
21268        fn max_ordinal_present(&self) -> u64 {
21269            if let Some(_) = self.node_ref {
21270                return 1;
21271            }
21272            0
21273        }
21274    }
21275
21276    impl fidl::encoding::ResourceTypeMarker for NodeGetNodeRefResponse {
21277        type Borrowed<'a> = &'a mut Self;
21278        fn take_or_borrow<'a>(
21279            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21280        ) -> Self::Borrowed<'a> {
21281            value
21282        }
21283    }
21284
21285    unsafe impl fidl::encoding::TypeMarker for NodeGetNodeRefResponse {
21286        type Owned = Self;
21287
21288        #[inline(always)]
21289        fn inline_align(_context: fidl::encoding::Context) -> usize {
21290            8
21291        }
21292
21293        #[inline(always)]
21294        fn inline_size(_context: fidl::encoding::Context) -> usize {
21295            16
21296        }
21297    }
21298
21299    unsafe impl
21300        fidl::encoding::Encode<
21301            NodeGetNodeRefResponse,
21302            fidl::encoding::DefaultFuchsiaResourceDialect,
21303        > for &mut NodeGetNodeRefResponse
21304    {
21305        unsafe fn encode(
21306            self,
21307            encoder: &mut fidl::encoding::Encoder<
21308                '_,
21309                fidl::encoding::DefaultFuchsiaResourceDialect,
21310            >,
21311            offset: usize,
21312            mut depth: fidl::encoding::Depth,
21313        ) -> fidl::Result<()> {
21314            encoder.debug_check_bounds::<NodeGetNodeRefResponse>(offset);
21315            // Vector header
21316            let max_ordinal: u64 = self.max_ordinal_present();
21317            encoder.write_num(max_ordinal, offset);
21318            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21319            // Calling encoder.out_of_line_offset(0) is not allowed.
21320            if max_ordinal == 0 {
21321                return Ok(());
21322            }
21323            depth.increment()?;
21324            let envelope_size = 8;
21325            let bytes_len = max_ordinal as usize * envelope_size;
21326            #[allow(unused_variables)]
21327            let offset = encoder.out_of_line_offset(bytes_len);
21328            let mut _prev_end_offset: usize = 0;
21329            if 1 > max_ordinal {
21330                return Ok(());
21331            }
21332
21333            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21334            // are envelope_size bytes.
21335            let cur_offset: usize = (1 - 1) * envelope_size;
21336
21337            // Zero reserved fields.
21338            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21339
21340            // Safety:
21341            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21342            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21343            //   envelope_size bytes, there is always sufficient room.
21344            fidl::encoding::encode_in_envelope_optional::<
21345                fidl::encoding::HandleType<
21346                    fidl::Event,
21347                    { fidl::ObjectType::EVENT.into_raw() },
21348                    2147483648,
21349                >,
21350                fidl::encoding::DefaultFuchsiaResourceDialect,
21351            >(
21352                self.node_ref.as_mut().map(
21353                    <fidl::encoding::HandleType<
21354                        fidl::Event,
21355                        { fidl::ObjectType::EVENT.into_raw() },
21356                        2147483648,
21357                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21358                ),
21359                encoder,
21360                offset + cur_offset,
21361                depth,
21362            )?;
21363
21364            _prev_end_offset = cur_offset + envelope_size;
21365
21366            Ok(())
21367        }
21368    }
21369
21370    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21371        for NodeGetNodeRefResponse
21372    {
21373        #[inline(always)]
21374        fn new_empty() -> Self {
21375            Self::default()
21376        }
21377
21378        unsafe fn decode(
21379            &mut self,
21380            decoder: &mut fidl::encoding::Decoder<
21381                '_,
21382                fidl::encoding::DefaultFuchsiaResourceDialect,
21383            >,
21384            offset: usize,
21385            mut depth: fidl::encoding::Depth,
21386        ) -> fidl::Result<()> {
21387            decoder.debug_check_bounds::<Self>(offset);
21388            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21389                None => return Err(fidl::Error::NotNullable),
21390                Some(len) => len,
21391            };
21392            // Calling decoder.out_of_line_offset(0) is not allowed.
21393            if len == 0 {
21394                return Ok(());
21395            };
21396            depth.increment()?;
21397            let envelope_size = 8;
21398            let bytes_len = len * envelope_size;
21399            let offset = decoder.out_of_line_offset(bytes_len)?;
21400            // Decode the envelope for each type.
21401            let mut _next_ordinal_to_read = 0;
21402            let mut next_offset = offset;
21403            let end_offset = offset + bytes_len;
21404            _next_ordinal_to_read += 1;
21405            if next_offset >= end_offset {
21406                return Ok(());
21407            }
21408
21409            // Decode unknown envelopes for gaps in ordinals.
21410            while _next_ordinal_to_read < 1 {
21411                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21412                _next_ordinal_to_read += 1;
21413                next_offset += envelope_size;
21414            }
21415
21416            let next_out_of_line = decoder.next_out_of_line();
21417            let handles_before = decoder.remaining_handles();
21418            if let Some((inlined, num_bytes, num_handles)) =
21419                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21420            {
21421                let member_inline_size = <fidl::encoding::HandleType<
21422                    fidl::Event,
21423                    { fidl::ObjectType::EVENT.into_raw() },
21424                    2147483648,
21425                > as fidl::encoding::TypeMarker>::inline_size(
21426                    decoder.context
21427                );
21428                if inlined != (member_inline_size <= 4) {
21429                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21430                }
21431                let inner_offset;
21432                let mut inner_depth = depth.clone();
21433                if inlined {
21434                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21435                    inner_offset = next_offset;
21436                } else {
21437                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21438                    inner_depth.increment()?;
21439                }
21440                let val_ref =
21441                self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21442                fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21443                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21444                {
21445                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21446                }
21447                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21448                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21449                }
21450            }
21451
21452            next_offset += envelope_size;
21453
21454            // Decode the remaining unknown envelopes.
21455            while next_offset < end_offset {
21456                _next_ordinal_to_read += 1;
21457                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21458                next_offset += envelope_size;
21459            }
21460
21461            Ok(())
21462        }
21463    }
21464
21465    impl VmoBuffer {
21466        #[inline(always)]
21467        fn max_ordinal_present(&self) -> u64 {
21468            if let Some(_) = self.close_weak_asap {
21469                return 3;
21470            }
21471            if let Some(_) = self.vmo_usable_start {
21472                return 2;
21473            }
21474            if let Some(_) = self.vmo {
21475                return 1;
21476            }
21477            0
21478        }
21479    }
21480
21481    impl fidl::encoding::ResourceTypeMarker for VmoBuffer {
21482        type Borrowed<'a> = &'a mut Self;
21483        fn take_or_borrow<'a>(
21484            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21485        ) -> Self::Borrowed<'a> {
21486            value
21487        }
21488    }
21489
21490    unsafe impl fidl::encoding::TypeMarker for VmoBuffer {
21491        type Owned = Self;
21492
21493        #[inline(always)]
21494        fn inline_align(_context: fidl::encoding::Context) -> usize {
21495            8
21496        }
21497
21498        #[inline(always)]
21499        fn inline_size(_context: fidl::encoding::Context) -> usize {
21500            16
21501        }
21502    }
21503
21504    unsafe impl fidl::encoding::Encode<VmoBuffer, fidl::encoding::DefaultFuchsiaResourceDialect>
21505        for &mut VmoBuffer
21506    {
21507        unsafe fn encode(
21508            self,
21509            encoder: &mut fidl::encoding::Encoder<
21510                '_,
21511                fidl::encoding::DefaultFuchsiaResourceDialect,
21512            >,
21513            offset: usize,
21514            mut depth: fidl::encoding::Depth,
21515        ) -> fidl::Result<()> {
21516            encoder.debug_check_bounds::<VmoBuffer>(offset);
21517            // Vector header
21518            let max_ordinal: u64 = self.max_ordinal_present();
21519            encoder.write_num(max_ordinal, offset);
21520            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21521            // Calling encoder.out_of_line_offset(0) is not allowed.
21522            if max_ordinal == 0 {
21523                return Ok(());
21524            }
21525            depth.increment()?;
21526            let envelope_size = 8;
21527            let bytes_len = max_ordinal as usize * envelope_size;
21528            #[allow(unused_variables)]
21529            let offset = encoder.out_of_line_offset(bytes_len);
21530            let mut _prev_end_offset: usize = 0;
21531            if 1 > max_ordinal {
21532                return Ok(());
21533            }
21534
21535            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21536            // are envelope_size bytes.
21537            let cur_offset: usize = (1 - 1) * envelope_size;
21538
21539            // Zero reserved fields.
21540            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21541
21542            // Safety:
21543            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21544            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21545            //   envelope_size bytes, there is always sufficient room.
21546            fidl::encoding::encode_in_envelope_optional::<
21547                fidl::encoding::HandleType<
21548                    fidl::Vmo,
21549                    { fidl::ObjectType::VMO.into_raw() },
21550                    2147483648,
21551                >,
21552                fidl::encoding::DefaultFuchsiaResourceDialect,
21553            >(
21554                self.vmo.as_mut().map(
21555                    <fidl::encoding::HandleType<
21556                        fidl::Vmo,
21557                        { fidl::ObjectType::VMO.into_raw() },
21558                        2147483648,
21559                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21560                ),
21561                encoder,
21562                offset + cur_offset,
21563                depth,
21564            )?;
21565
21566            _prev_end_offset = cur_offset + envelope_size;
21567            if 2 > max_ordinal {
21568                return Ok(());
21569            }
21570
21571            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21572            // are envelope_size bytes.
21573            let cur_offset: usize = (2 - 1) * envelope_size;
21574
21575            // Zero reserved fields.
21576            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21577
21578            // Safety:
21579            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21580            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21581            //   envelope_size bytes, there is always sufficient room.
21582            fidl::encoding::encode_in_envelope_optional::<
21583                u64,
21584                fidl::encoding::DefaultFuchsiaResourceDialect,
21585            >(
21586                self.vmo_usable_start
21587                    .as_ref()
21588                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
21589                encoder,
21590                offset + cur_offset,
21591                depth,
21592            )?;
21593
21594            _prev_end_offset = cur_offset + envelope_size;
21595            if 3 > max_ordinal {
21596                return Ok(());
21597            }
21598
21599            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21600            // are envelope_size bytes.
21601            let cur_offset: usize = (3 - 1) * envelope_size;
21602
21603            // Zero reserved fields.
21604            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21605
21606            // Safety:
21607            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21608            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21609            //   envelope_size bytes, there is always sufficient room.
21610            fidl::encoding::encode_in_envelope_optional::<
21611                fidl::encoding::HandleType<
21612                    fidl::EventPair,
21613                    { fidl::ObjectType::EVENTPAIR.into_raw() },
21614                    2147483648,
21615                >,
21616                fidl::encoding::DefaultFuchsiaResourceDialect,
21617            >(
21618                self.close_weak_asap.as_mut().map(
21619                    <fidl::encoding::HandleType<
21620                        fidl::EventPair,
21621                        { fidl::ObjectType::EVENTPAIR.into_raw() },
21622                        2147483648,
21623                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21624                ),
21625                encoder,
21626                offset + cur_offset,
21627                depth,
21628            )?;
21629
21630            _prev_end_offset = cur_offset + envelope_size;
21631
21632            Ok(())
21633        }
21634    }
21635
21636    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {
21637        #[inline(always)]
21638        fn new_empty() -> Self {
21639            Self::default()
21640        }
21641
21642        unsafe fn decode(
21643            &mut self,
21644            decoder: &mut fidl::encoding::Decoder<
21645                '_,
21646                fidl::encoding::DefaultFuchsiaResourceDialect,
21647            >,
21648            offset: usize,
21649            mut depth: fidl::encoding::Depth,
21650        ) -> fidl::Result<()> {
21651            decoder.debug_check_bounds::<Self>(offset);
21652            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21653                None => return Err(fidl::Error::NotNullable),
21654                Some(len) => len,
21655            };
21656            // Calling decoder.out_of_line_offset(0) is not allowed.
21657            if len == 0 {
21658                return Ok(());
21659            };
21660            depth.increment()?;
21661            let envelope_size = 8;
21662            let bytes_len = len * envelope_size;
21663            let offset = decoder.out_of_line_offset(bytes_len)?;
21664            // Decode the envelope for each type.
21665            let mut _next_ordinal_to_read = 0;
21666            let mut next_offset = offset;
21667            let end_offset = offset + bytes_len;
21668            _next_ordinal_to_read += 1;
21669            if next_offset >= end_offset {
21670                return Ok(());
21671            }
21672
21673            // Decode unknown envelopes for gaps in ordinals.
21674            while _next_ordinal_to_read < 1 {
21675                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21676                _next_ordinal_to_read += 1;
21677                next_offset += envelope_size;
21678            }
21679
21680            let next_out_of_line = decoder.next_out_of_line();
21681            let handles_before = decoder.remaining_handles();
21682            if let Some((inlined, num_bytes, num_handles)) =
21683                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21684            {
21685                let member_inline_size = <fidl::encoding::HandleType<
21686                    fidl::Vmo,
21687                    { fidl::ObjectType::VMO.into_raw() },
21688                    2147483648,
21689                > as fidl::encoding::TypeMarker>::inline_size(
21690                    decoder.context
21691                );
21692                if inlined != (member_inline_size <= 4) {
21693                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21694                }
21695                let inner_offset;
21696                let mut inner_depth = depth.clone();
21697                if inlined {
21698                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21699                    inner_offset = next_offset;
21700                } else {
21701                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21702                    inner_depth.increment()?;
21703                }
21704                let val_ref =
21705                self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21706                fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21707                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21708                {
21709                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21710                }
21711                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21712                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21713                }
21714            }
21715
21716            next_offset += envelope_size;
21717            _next_ordinal_to_read += 1;
21718            if next_offset >= end_offset {
21719                return Ok(());
21720            }
21721
21722            // Decode unknown envelopes for gaps in ordinals.
21723            while _next_ordinal_to_read < 2 {
21724                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21725                _next_ordinal_to_read += 1;
21726                next_offset += envelope_size;
21727            }
21728
21729            let next_out_of_line = decoder.next_out_of_line();
21730            let handles_before = decoder.remaining_handles();
21731            if let Some((inlined, num_bytes, num_handles)) =
21732                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21733            {
21734                let member_inline_size =
21735                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21736                if inlined != (member_inline_size <= 4) {
21737                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21738                }
21739                let inner_offset;
21740                let mut inner_depth = depth.clone();
21741                if inlined {
21742                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21743                    inner_offset = next_offset;
21744                } else {
21745                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21746                    inner_depth.increment()?;
21747                }
21748                let val_ref = self.vmo_usable_start.get_or_insert_with(|| {
21749                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
21750                });
21751                fidl::decode!(
21752                    u64,
21753                    fidl::encoding::DefaultFuchsiaResourceDialect,
21754                    val_ref,
21755                    decoder,
21756                    inner_offset,
21757                    inner_depth
21758                )?;
21759                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21760                {
21761                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21762                }
21763                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21764                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21765                }
21766            }
21767
21768            next_offset += envelope_size;
21769            _next_ordinal_to_read += 1;
21770            if next_offset >= end_offset {
21771                return Ok(());
21772            }
21773
21774            // Decode unknown envelopes for gaps in ordinals.
21775            while _next_ordinal_to_read < 3 {
21776                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21777                _next_ordinal_to_read += 1;
21778                next_offset += envelope_size;
21779            }
21780
21781            let next_out_of_line = decoder.next_out_of_line();
21782            let handles_before = decoder.remaining_handles();
21783            if let Some((inlined, num_bytes, num_handles)) =
21784                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21785            {
21786                let member_inline_size = <fidl::encoding::HandleType<
21787                    fidl::EventPair,
21788                    { fidl::ObjectType::EVENTPAIR.into_raw() },
21789                    2147483648,
21790                > as fidl::encoding::TypeMarker>::inline_size(
21791                    decoder.context
21792                );
21793                if inlined != (member_inline_size <= 4) {
21794                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21795                }
21796                let inner_offset;
21797                let mut inner_depth = depth.clone();
21798                if inlined {
21799                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21800                    inner_offset = next_offset;
21801                } else {
21802                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21803                    inner_depth.increment()?;
21804                }
21805                let val_ref =
21806                self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21807                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21808                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21809                {
21810                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21811                }
21812                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21813                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21814                }
21815            }
21816
21817            next_offset += envelope_size;
21818
21819            // Decode the remaining unknown envelopes.
21820            while next_offset < end_offset {
21821                _next_ordinal_to_read += 1;
21822                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21823                next_offset += envelope_size;
21824            }
21825
21826            Ok(())
21827        }
21828    }
21829}