fidl_fuchsia_sysmem2/fidl_fuchsia_sysmem2.rs
1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_sysmem2__common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Default, PartialEq)]
15pub struct AllocatorAllocateNonSharedCollectionRequest {
16 pub collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17 #[doc(hidden)]
18 pub __source_breaking: fidl::marker::SourceBreaking,
19}
20
21impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
22 for AllocatorAllocateNonSharedCollectionRequest
23{
24}
25
26#[derive(Debug, Default, PartialEq)]
27pub struct AllocatorAllocateSharedCollectionRequest {
28 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
29 #[doc(hidden)]
30 pub __source_breaking: fidl::marker::SourceBreaking,
31}
32
33impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
34 for AllocatorAllocateSharedCollectionRequest
35{
36}
37
38#[derive(Debug, Default, PartialEq)]
39pub struct AllocatorBindSharedCollectionRequest {
40 pub token: Option<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
41 pub buffer_collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
42 #[doc(hidden)]
43 pub __source_breaking: fidl::marker::SourceBreaking,
44}
45
46impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
47 for AllocatorBindSharedCollectionRequest
48{
49}
50
51#[derive(Debug, Default, PartialEq)]
52pub struct AllocatorGetVmoInfoRequest {
53 /// `vmo` is required to be set; ownership is transferred to the server
54 /// so in most cases a client will duplicate a handle and transfer the
55 /// duplicate via this field.
56 pub vmo: Option<fidl::Vmo>,
57 #[doc(hidden)]
58 pub __source_breaking: fidl::marker::SourceBreaking,
59}
60
61impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
62 for AllocatorGetVmoInfoRequest
63{
64}
65
66#[derive(Debug, Default, PartialEq)]
67pub struct AllocatorGetVmoInfoResponse {
68 pub buffer_collection_id: Option<u64>,
69 pub buffer_index: Option<u64>,
70 pub close_weak_asap: Option<fidl::EventPair>,
71 #[doc(hidden)]
72 pub __source_breaking: fidl::marker::SourceBreaking,
73}
74
75impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
76 for AllocatorGetVmoInfoResponse
77{
78}
79
80#[derive(Debug, Default, PartialEq)]
81pub struct BufferCollectionAttachLifetimeTrackingRequest {
82 pub server_end: Option<fidl::EventPair>,
83 pub buffers_remaining: Option<u32>,
84 #[doc(hidden)]
85 pub __source_breaking: fidl::marker::SourceBreaking,
86}
87
88impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
89 for BufferCollectionAttachLifetimeTrackingRequest
90{
91}
92
93#[derive(Debug, Default, PartialEq)]
94pub struct BufferCollectionAttachTokenRequest {
95 pub rights_attenuation_mask: Option<fidl::Rights>,
96 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
97 #[doc(hidden)]
98 pub __source_breaking: fidl::marker::SourceBreaking,
99}
100
101impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
102 for BufferCollectionAttachTokenRequest
103{
104}
105
106/// Information about a buffer collection and its buffers.
107#[derive(Debug, Default, PartialEq)]
108pub struct BufferCollectionInfo {
109 /// These settings apply to all the buffers in the initial buffer
110 /// allocation.
111 ///
112 /// This field will always be set by sysmem.
113 pub settings: Option<SingleBufferSettings>,
114 /// VMO handles (and vmo_usable_start offset) for each buffer in the
115 /// collection.
116 ///
117 /// The size of this vector is the buffer_count (buffer_count is not sent
118 /// separately).
119 ///
120 /// All buffer VMO handles have identical size and access rights. The size
121 /// is in settings.buffer_settings.size_bytes.
122 ///
123 /// The VMO access rights are determined based on the usages which the
124 /// client specified when allocating the buffer collection. For example, a
125 /// client which expressed a read-only usage will receive VMOs without write
126 /// rights. In addition, the rights can be attenuated by the parameter to
127 /// BufferCollectionToken.Duplicate() calls.
128 ///
129 /// This field will always have VmoBuffer(s) in it, even if the participant
130 /// specifies usage whieh does not require VMO handles. This permits such a
131 /// participant to know the vmo_usable_start values, in case that's of any
132 /// use to the participant.
133 ///
134 /// This field will always be set by sysmem, even if the participant doesn't
135 /// specify any buffer usage (but the [`fuchsia.sysmem2/VmoBuffer.vmo`]
136 /// sub-field within this field won't be set in that case).
137 pub buffers: Option<Vec<VmoBuffer>>,
138 /// This number is unique among all logical buffer collections per boot.
139 ///
140 /// This ID number will be the same for all BufferCollectionToken(s),
141 /// BufferCollection(s), and BufferCollectionTokenGroup(s) associated with
142 /// the same logical buffer collection (derived from the same root token
143 /// created with fuchsia.sysmem2.Allocator.CreateSharedCollection, or with
144 /// CreateNonSharedCollection).
145 ///
146 /// The same ID can be retrieved from a BufferCollectionToken,
147 /// BufferCollection, or BufferCollectionTokenGroup using
148 /// GetBufferCollectionId (at the cost of a round-trip to sysmem and back).
149 ///
150 /// This field will always be set by sysmem.
151 pub buffer_collection_id: Option<u64>,
152 #[doc(hidden)]
153 pub __source_breaking: fidl::marker::SourceBreaking,
154}
155
156impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for BufferCollectionInfo {}
157
158#[derive(Debug, Default, PartialEq)]
159pub struct BufferCollectionSetConstraintsRequest {
160 pub constraints: Option<BufferCollectionConstraints>,
161 #[doc(hidden)]
162 pub __source_breaking: fidl::marker::SourceBreaking,
163}
164
165impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
166 for BufferCollectionSetConstraintsRequest
167{
168}
169
170#[derive(Debug, Default, PartialEq)]
171pub struct BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
172 pub group_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>>,
173 #[doc(hidden)]
174 pub __source_breaking: fidl::marker::SourceBreaking,
175}
176
177impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
178 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
179{
180}
181
182#[derive(Debug, Default, PartialEq)]
183pub struct BufferCollectionTokenDuplicateRequest {
184 pub rights_attenuation_mask: Option<fidl::Rights>,
185 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
186 #[doc(hidden)]
187 pub __source_breaking: fidl::marker::SourceBreaking,
188}
189
190impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
191 for BufferCollectionTokenDuplicateRequest
192{
193}
194
195#[derive(Debug, Default, PartialEq)]
196pub struct BufferCollectionTokenGroupCreateChildRequest {
197 /// Must be set.
198 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
199 /// If not set, the default is `ZX_RIGHT_SAME_RIGHTS`.
200 pub rights_attenuation_mask: Option<fidl::Rights>,
201 #[doc(hidden)]
202 pub __source_breaking: fidl::marker::SourceBreaking,
203}
204
205impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
206 for BufferCollectionTokenGroupCreateChildRequest
207{
208}
209
210#[derive(Debug, Default, PartialEq)]
211pub struct BufferCollectionTokenGroupCreateChildrenSyncResponse {
212 pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
213 #[doc(hidden)]
214 pub __source_breaking: fidl::marker::SourceBreaking,
215}
216
217impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
218 for BufferCollectionTokenGroupCreateChildrenSyncResponse
219{
220}
221
222#[derive(Debug, Default, PartialEq)]
223pub struct BufferCollectionTokenDuplicateSyncResponse {
224 pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
225 #[doc(hidden)]
226 pub __source_breaking: fidl::marker::SourceBreaking,
227}
228
229impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
230 for BufferCollectionTokenDuplicateSyncResponse
231{
232}
233
234#[derive(Debug, Default, PartialEq)]
235pub struct BufferCollectionWaitForAllBuffersAllocatedResponse {
236 pub buffer_collection_info: Option<BufferCollectionInfo>,
237 #[doc(hidden)]
238 pub __source_breaking: fidl::marker::SourceBreaking,
239}
240
241impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
242 for BufferCollectionWaitForAllBuffersAllocatedResponse
243{
244}
245
246#[derive(Debug, Default, PartialEq)]
247pub struct NodeAttachNodeTrackingRequest {
248 /// This field must be set. This evenpair end will be closed after the
249 /// `Node` is closed or failed and the node's buffer counts are no
250 /// longer in effect in the logical buffer collection.
251 pub server_end: Option<fidl::EventPair>,
252 #[doc(hidden)]
253 pub __source_breaking: fidl::marker::SourceBreaking,
254}
255
256impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
257 for NodeAttachNodeTrackingRequest
258{
259}
260
261#[derive(Debug, Default, PartialEq)]
262pub struct NodeIsAlternateForRequest {
263 pub node_ref: Option<fidl::Event>,
264 #[doc(hidden)]
265 pub __source_breaking: fidl::marker::SourceBreaking,
266}
267
268impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeIsAlternateForRequest {}
269
270#[derive(Debug, Default, PartialEq)]
271pub struct NodeSetWeakOkRequest {
272 pub for_child_nodes_also: Option<bool>,
273 #[doc(hidden)]
274 pub __source_breaking: fidl::marker::SourceBreaking,
275}
276
277impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeSetWeakOkRequest {}
278
279#[derive(Debug, Default, PartialEq)]
280pub struct NodeGetNodeRefResponse {
281 pub node_ref: Option<fidl::Event>,
282 #[doc(hidden)]
283 pub __source_breaking: fidl::marker::SourceBreaking,
284}
285
286impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeGetNodeRefResponse {}
287
288#[derive(Debug, Default, PartialEq)]
289pub struct VmoBuffer {
290 /// `vmo` can be un-set if a participant has only
291 /// [`fuchsia.sysmem2/BufferUsage.none`] set to `NONE_USAGE` (explicitly or
292 /// implicitly by [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
293 /// without `constraints` set).
294 pub vmo: Option<fidl::Vmo>,
295 /// Offset within the VMO of the first usable byte. Must be < the VMO's size
296 /// in bytes, and leave sufficient room for BufferMemorySettings.size_bytes
297 /// before the end of the VMO.
298 ///
299 /// Currently sysmem will always set this field to 0, and in future, sysmem
300 /// won't set this field to a non-zero value unless all participants have
301 /// explicitly indicated support for non-zero vmo_usable_start (this
302 /// mechanism does not exist as of this comment). A participant that hasn't
303 /// explicitly indicated support for non-zero vmo_usable_start (all current
304 /// clients) should implicitly assume this field is set to 0 without
305 /// actually checking this field.
306 pub vmo_usable_start: Option<u64>,
307 /// This field is set iff `vmo` is a sysmem weak VMO handle.
308 ///
309 /// If the client sent `SetWeakOk`, the client must keep `close_weak_asap`
310 /// around for as long as `vmo`, and must notice `ZX_EVENTPAIR_PEER_CLOSED`.
311 /// If that signal occurs, the client must close `vmo` asap.
312 ///
313 /// If the `vmo` is a sysmem weak VMO handle but the client didn't send
314 /// `SetWeakOk`, this means that a holder of a parent node sent `SetWeakOk`
315 /// with `for_child_nodes_also` true, and the owner of that parent node is
316 /// responsible for paying attention to `close_weak_asap` and informing
317 /// child token participants to close handles. In this case the participant
318 /// that never sent `SetWeakOk` is allowed to retain and/or pay attention to
319 /// `close_weak_asap` (to close the handle faster, or for other reasons such
320 /// as diagnosing overall buffer cleanup timing), but is not required to
321 /// retain or pay attention to `close_weak_asap`.
322 ///
323 /// If sysmem closing the sysmem end of `close_weak_asap` does not result in
324 /// quick closure of all sysmem weak VMO handles to the buffer, that's
325 /// considered a VMO leak, and in that case sysmem will eventually complain
326 /// loudly via syslog (currently 5s later).
327 pub close_weak_asap: Option<fidl::EventPair>,
328 #[doc(hidden)]
329 pub __source_breaking: fidl::marker::SourceBreaking,
330}
331
332impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {}
333
334#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
335pub struct AllocatorMarker;
336
337impl fidl::endpoints::ProtocolMarker for AllocatorMarker {
338 type Proxy = AllocatorProxy;
339 type RequestStream = AllocatorRequestStream;
340 #[cfg(target_os = "fuchsia")]
341 type SynchronousProxy = AllocatorSynchronousProxy;
342
343 const DEBUG_NAME: &'static str = "fuchsia.sysmem2.Allocator";
344}
345impl fidl::endpoints::DiscoverableProtocolMarker for AllocatorMarker {}
346pub type AllocatorGetVmoInfoResult = Result<AllocatorGetVmoInfoResponse, Error>;
347
348pub trait AllocatorProxyInterface: Send + Sync {
349 fn r#allocate_non_shared_collection(
350 &self,
351 payload: AllocatorAllocateNonSharedCollectionRequest,
352 ) -> Result<(), fidl::Error>;
353 fn r#allocate_shared_collection(
354 &self,
355 payload: AllocatorAllocateSharedCollectionRequest,
356 ) -> Result<(), fidl::Error>;
357 fn r#bind_shared_collection(
358 &self,
359 payload: AllocatorBindSharedCollectionRequest,
360 ) -> Result<(), fidl::Error>;
361 type ValidateBufferCollectionTokenResponseFut: std::future::Future<
362 Output = Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error>,
363 > + Send;
364 fn r#validate_buffer_collection_token(
365 &self,
366 payload: &AllocatorValidateBufferCollectionTokenRequest,
367 ) -> Self::ValidateBufferCollectionTokenResponseFut;
368 fn r#set_debug_client_info(
369 &self,
370 payload: &AllocatorSetDebugClientInfoRequest,
371 ) -> Result<(), fidl::Error>;
372 type GetVmoInfoResponseFut: std::future::Future<Output = Result<AllocatorGetVmoInfoResult, fidl::Error>>
373 + Send;
374 fn r#get_vmo_info(&self, payload: AllocatorGetVmoInfoRequest) -> Self::GetVmoInfoResponseFut;
375}
376#[derive(Debug)]
377#[cfg(target_os = "fuchsia")]
378pub struct AllocatorSynchronousProxy {
379 client: fidl::client::sync::Client,
380}
381
382#[cfg(target_os = "fuchsia")]
383impl fidl::endpoints::SynchronousProxy for AllocatorSynchronousProxy {
384 type Proxy = AllocatorProxy;
385 type Protocol = AllocatorMarker;
386
387 fn from_channel(inner: fidl::Channel) -> Self {
388 Self::new(inner)
389 }
390
391 fn into_channel(self) -> fidl::Channel {
392 self.client.into_channel()
393 }
394
395 fn as_channel(&self) -> &fidl::Channel {
396 self.client.as_channel()
397 }
398}
399
400#[cfg(target_os = "fuchsia")]
401impl AllocatorSynchronousProxy {
402 pub fn new(channel: fidl::Channel) -> Self {
403 Self { client: fidl::client::sync::Client::new(channel) }
404 }
405
406 pub fn into_channel(self) -> fidl::Channel {
407 self.client.into_channel()
408 }
409
410 /// Waits until an event arrives and returns it. It is safe for other
411 /// threads to make concurrent requests while waiting for an event.
412 pub fn wait_for_event(
413 &self,
414 deadline: zx::MonotonicInstant,
415 ) -> Result<AllocatorEvent, fidl::Error> {
416 AllocatorEvent::decode(self.client.wait_for_event::<AllocatorMarker>(deadline)?)
417 }
418
419 /// Allocates a buffer collection on behalf of a single client (aka
420 /// initiator) who is also the only participant (from the point of view of
421 /// sysmem).
422 ///
423 /// This call exists mainly for temp/testing purposes. This call skips the
424 /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
425 /// allow another participant to specify its constraints.
426 ///
427 /// Real clients are encouraged to use
428 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
429 /// let relevant participants directly convey their own constraints to
430 /// sysmem by sending `BufferCollectionToken`s to those participants.
431 ///
432 /// + request `collection_request` The server end of the
433 /// [`fuchsia.sysmem2/BufferCollection`].
434 pub fn r#allocate_non_shared_collection(
435 &self,
436 mut payload: AllocatorAllocateNonSharedCollectionRequest,
437 ) -> Result<(), fidl::Error> {
438 self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
439 &mut payload,
440 0x5ca681f025a80e44,
441 fidl::encoding::DynamicFlags::FLEXIBLE,
442 )
443 }
444
445 /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
446 ///
447 /// The `BufferCollectionToken` can be "duplicated" for distribution to
448 /// participants by using
449 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
450 /// `BufferCollectionToken` can be converted into a
451 /// [`fuchsia.sysmem2.BufferCollection`] using
452 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
453 ///
454 /// Buffer constraints can be set via
455 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
456 ///
457 /// Success/failure to populate the buffer collection with buffers can be
458 /// determined from
459 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
460 ///
461 /// Closing the client end of a `BufferCollectionToken` or
462 /// `BufferCollection` (without `Release` first) will fail all client ends
463 /// in the same failure domain, which by default is all client ends of the
464 /// buffer collection. See
465 /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
466 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
467 /// separate failure domains within a buffer collection.
468 pub fn r#allocate_shared_collection(
469 &self,
470 mut payload: AllocatorAllocateSharedCollectionRequest,
471 ) -> Result<(), fidl::Error> {
472 self.client.send::<AllocatorAllocateSharedCollectionRequest>(
473 &mut payload,
474 0x11a19ff51f0b49c1,
475 fidl::encoding::DynamicFlags::FLEXIBLE,
476 )
477 }
478
479 /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
480 /// [`fuchsia.sysmem2/BufferCollection`].
481 ///
482 /// At the time of sending this message, the buffer collection hasn't yet
483 /// been populated with buffers - the participant must first also send
484 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
485 /// `BufferCollection` client end.
486 ///
487 /// All `BufferCollectionToken`(s) duplicated from a root
488 /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
489 /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
490 /// existing `BufferCollection` client ends must have sent `SetConstraints`
491 /// before the logical BufferCollection will be populated with buffers (or
492 /// will fail if the overall set of constraints can't be satisfied).
493 ///
494 /// + request `token` The client endpoint of a channel whose server end was
495 /// sent to sysmem using
496 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
497 /// end was sent to sysmem using
498 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. The token is
499 /// being "turned in" in exchange for a
500 /// [`fuchsia.sysmem2/BufferCollection`].
501 /// + request `buffer_collection_request` The server end of a
502 /// [`fuchsia.sysmem2/BufferCollection`] channel. The sender retains the
503 /// client end. The `BufferCollection` channel is a single participant's
504 /// connection to the logical buffer collection. Typically there will be
505 /// other participants with their own `BufferCollection` channel to the
506 /// logical buffer collection.
507 pub fn r#bind_shared_collection(
508 &self,
509 mut payload: AllocatorBindSharedCollectionRequest,
510 ) -> Result<(), fidl::Error> {
511 self.client.send::<AllocatorBindSharedCollectionRequest>(
512 &mut payload,
513 0x550916b0dc1d5b4e,
514 fidl::encoding::DynamicFlags::FLEXIBLE,
515 )
516 }
517
518 /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
519 /// the sysmem server.
520 ///
521 /// With this call, the client can determine whether an incoming token is a
522 /// real sysmem token that is known to the sysmem server, without any risk
523 /// of getting stuck waiting forever on a potentially fake token to complete
524 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
525 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
526 /// FIDL message). In cases where the client trusts the source of the token
527 /// to provide a real token, this call is not typically needed outside of
528 /// debugging.
529 ///
530 /// If the validate fails sometimes but succeeds other times, the source of
531 /// the token may itself not be calling
532 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
533 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
534 /// token but before sending the token to the current client. It may be more
535 /// convenient for the source to use
536 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
537 /// token(s), since that call has the sync step built in. Or, the buffer
538 /// collection may be failing before this call is processed by the sysmem
539 /// server, as buffer collection failure cleans up sysmem's tracking of
540 /// associated tokens.
541 ///
542 /// This call has no effect on any token.
543 ///
544 /// + request `token_server_koid` The koid of the server end of a channel
545 /// that might be a BufferCollectionToken channel. This can be obtained
546 /// via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
547 /// - response `is_known` true means sysmem knew of the token at the time
548 /// sysmem processed the request, but doesn't guarantee that the token is
549 /// still valid by the time the client receives the reply. What it does
550 /// guarantee is that the token at least was a real token, so a two-way
551 /// call to the token won't stall forever (will fail or succeed fairly
552 /// quickly, not stall). This can already be known implicitly if the
553 /// source of the token can be trusted to provide a real token. A false
554 /// value means the token wasn't known to sysmem at the time sysmem
555 /// processed this call, but the token may have previously been valid, or
556 /// may yet become valid. Or if the sender of the token isn't trusted to
557 /// provide a real token, the token may be fake. It's the responsibility
558 /// of the sender to sync with sysmem to ensure that previously
559 /// created/duplicated token(s) are known to sysmem, before sending the
560 /// token(s) to other participants.
561 pub fn r#validate_buffer_collection_token(
562 &self,
563 mut payload: &AllocatorValidateBufferCollectionTokenRequest,
564 ___deadline: zx::MonotonicInstant,
565 ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
566 let _response = self.client.send_query::<
567 AllocatorValidateBufferCollectionTokenRequest,
568 fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
569 AllocatorMarker,
570 >(
571 payload,
572 0x4c5ee91b02a7e68d,
573 fidl::encoding::DynamicFlags::FLEXIBLE,
574 ___deadline,
575 )?
576 .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
577 Ok(_response)
578 }
579
580 /// Set information about the current client that can be used by sysmem to
581 /// help diagnose leaking memory and allocation stalls waiting for a
582 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
583 ///
584 /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
585 /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
586 /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
587 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
588 /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
589 /// these `BufferCollection`(s) have the same initial debug client info as
590 /// the token turned in to create the `BufferCollection`).
591 ///
592 /// This info can be subsequently overridden on a per-`Node` basis by
593 /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
594 ///
595 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
596 /// `Allocator` is the most efficient way to ensure that all
597 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
598 /// set, and is also more efficient than separately sending the same debug
599 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
600 /// created [`fuchsia.sysmem2/Node`].
601 ///
602 /// + request `name` This can be an arbitrary string, but the current
603 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
604 /// + request `id` This can be an arbitrary id, but the current process ID
605 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
606 pub fn r#set_debug_client_info(
607 &self,
608 mut payload: &AllocatorSetDebugClientInfoRequest,
609 ) -> Result<(), fidl::Error> {
610 self.client.send::<AllocatorSetDebugClientInfoRequest>(
611 payload,
612 0x6f68f19a3f509c4d,
613 fidl::encoding::DynamicFlags::FLEXIBLE,
614 )
615 }
616
617 /// Given a handle to a sysmem-provided VMO, this returns additional info
618 /// about the corresponding sysmem logical buffer.
619 ///
620 /// Most callers will duplicate a VMO handle first and send the duplicate to
621 /// this call.
622 ///
623 /// If the client has created a child VMO of a sysmem-provided VMO, that
624 /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
625 ///
626 /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
627 /// - response `buffer_collection_id` The buffer collection ID, which is
628 /// unique per logical buffer collection per boot.
629 /// - response `buffer_index` The buffer index of the buffer within the
630 /// buffer collection. This is the same as the index of the buffer within
631 /// [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
632 /// is the same for all sysmem-delivered VMOs corresponding to the same
633 /// logical buffer, even if the VMO koids differ. The `buffer_index` is
634 /// only unique across buffers of a buffer collection. For a given buffer,
635 /// the combination of `buffer_collection_id` and `buffer_index` is unique
636 /// per boot.
637 /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
638 /// the `close_weak_asap` field will be set in the response. This handle
639 /// will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
640 /// the buffer should be closed as soon as possible. This is signalled
641 /// shortly after all strong sysmem VMOs to the buffer are closed
642 /// (including any held indirectly via strong `BufferCollectionToken` or
643 /// strong `BufferCollection`). Failure to close all weak sysmem VMO
644 /// handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
645 /// considered a VMO leak caused by the client still holding a weak sysmem
646 /// VMO handle and results in loud complaints to the log by sysmem. The
647 /// buffers of a collection can be freed independently of each other. The
648 /// `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
649 /// response arrives at the client. A client that isn't prepared to handle
650 /// weak sysmem VMOs, on seeing this field set, can close all handles to
651 /// the buffer and fail any associated request.
652 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
653 /// VMO. Both strong and weak sysmem VMOs can be passed to this call, and
654 /// the VMO handle passed in to this call itself keeps the VMO's info
655 /// alive for purposes of responding to this call. Because of this,
656 /// ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
657 /// handles to the VMO when calling; even if other handles are closed
658 /// before the GetVmoInfo response arrives at the client).
659 /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
660 /// capable of being used with GetVmoInfo due to rights/capability
661 /// attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
662 /// topic [`ZX_INFO_HANDLE_BASIC`].
663 /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
664 /// unspecified reason. See the log for more info.
665 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
666 /// wasn't set, or there was some other problem with the request field(s).
667 pub fn r#get_vmo_info(
668 &self,
669 mut payload: AllocatorGetVmoInfoRequest,
670 ___deadline: zx::MonotonicInstant,
671 ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
672 let _response = self.client.send_query::<
673 AllocatorGetVmoInfoRequest,
674 fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
675 AllocatorMarker,
676 >(
677 &mut payload,
678 0x21a881120aa0ddf9,
679 fidl::encoding::DynamicFlags::FLEXIBLE,
680 ___deadline,
681 )?
682 .into_result::<AllocatorMarker>("get_vmo_info")?;
683 Ok(_response.map(|x| x))
684 }
685}
686
687#[cfg(target_os = "fuchsia")]
688impl From<AllocatorSynchronousProxy> for zx::NullableHandle {
689 fn from(value: AllocatorSynchronousProxy) -> Self {
690 value.into_channel().into()
691 }
692}
693
694#[cfg(target_os = "fuchsia")]
695impl From<fidl::Channel> for AllocatorSynchronousProxy {
696 fn from(value: fidl::Channel) -> Self {
697 Self::new(value)
698 }
699}
700
701#[cfg(target_os = "fuchsia")]
702impl fidl::endpoints::FromClient for AllocatorSynchronousProxy {
703 type Protocol = AllocatorMarker;
704
705 fn from_client(value: fidl::endpoints::ClientEnd<AllocatorMarker>) -> Self {
706 Self::new(value.into_channel())
707 }
708}
709
710#[derive(Debug, Clone)]
711pub struct AllocatorProxy {
712 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
713}
714
715impl fidl::endpoints::Proxy for AllocatorProxy {
716 type Protocol = AllocatorMarker;
717
718 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
719 Self::new(inner)
720 }
721
722 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
723 self.client.into_channel().map_err(|client| Self { client })
724 }
725
726 fn as_channel(&self) -> &::fidl::AsyncChannel {
727 self.client.as_channel()
728 }
729}
730
731impl AllocatorProxy {
732 /// Create a new Proxy for fuchsia.sysmem2/Allocator.
733 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
734 let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
735 Self { client: fidl::client::Client::new(channel, protocol_name) }
736 }
737
738 /// Get a Stream of events from the remote end of the protocol.
739 ///
740 /// # Panics
741 ///
742 /// Panics if the event stream was already taken.
743 pub fn take_event_stream(&self) -> AllocatorEventStream {
744 AllocatorEventStream { event_receiver: self.client.take_event_receiver() }
745 }
746
747 /// Allocates a buffer collection on behalf of a single client (aka
748 /// initiator) who is also the only participant (from the point of view of
749 /// sysmem).
750 ///
751 /// This call exists mainly for temp/testing purposes. This call skips the
752 /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
753 /// allow another participant to specify its constraints.
754 ///
755 /// Real clients are encouraged to use
756 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
757 /// let relevant participants directly convey their own constraints to
758 /// sysmem by sending `BufferCollectionToken`s to those participants.
759 ///
760 /// + request `collection_request` The server end of the
761 /// [`fuchsia.sysmem2/BufferCollection`].
762 pub fn r#allocate_non_shared_collection(
763 &self,
764 mut payload: AllocatorAllocateNonSharedCollectionRequest,
765 ) -> Result<(), fidl::Error> {
766 AllocatorProxyInterface::r#allocate_non_shared_collection(self, payload)
767 }
768
769 /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
770 ///
771 /// The `BufferCollectionToken` can be "duplicated" for distribution to
772 /// participants by using
773 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
774 /// `BufferCollectionToken` can be converted into a
775 /// [`fuchsia.sysmem2.BufferCollection`] using
776 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
777 ///
778 /// Buffer constraints can be set via
779 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
780 ///
781 /// Success/failure to populate the buffer collection with buffers can be
782 /// determined from
783 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
784 ///
785 /// Closing the client end of a `BufferCollectionToken` or
786 /// `BufferCollection` (without `Release` first) will fail all client ends
787 /// in the same failure domain, which by default is all client ends of the
788 /// buffer collection. See
789 /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
790 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
791 /// separate failure domains within a buffer collection.
792 pub fn r#allocate_shared_collection(
793 &self,
794 mut payload: AllocatorAllocateSharedCollectionRequest,
795 ) -> Result<(), fidl::Error> {
796 AllocatorProxyInterface::r#allocate_shared_collection(self, payload)
797 }
798
799 /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
800 /// [`fuchsia.sysmem2/BufferCollection`].
801 ///
802 /// At the time of sending this message, the buffer collection hasn't yet
803 /// been populated with buffers - the participant must first also send
804 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
805 /// `BufferCollection` client end.
806 ///
807 /// All `BufferCollectionToken`(s) duplicated from a root
808 /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
809 /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
810 /// existing `BufferCollection` client ends must have sent `SetConstraints`
811 /// before the logical BufferCollection will be populated with buffers (or
812 /// will fail if the overall set of constraints can't be satisfied).
813 ///
814 /// + request `token` The client endpoint of a channel whose server end was
815 /// sent to sysmem using
816 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
817 /// end was sent to sysmem using
818 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. The token is
819 /// being "turned in" in exchange for a
820 /// [`fuchsia.sysmem2/BufferCollection`].
821 /// + request `buffer_collection_request` The server end of a
822 /// [`fuchsia.sysmem2/BufferCollection`] channel. The sender retains the
823 /// client end. The `BufferCollection` channel is a single participant's
824 /// connection to the logical buffer collection. Typically there will be
825 /// other participants with their own `BufferCollection` channel to the
826 /// logical buffer collection.
827 pub fn r#bind_shared_collection(
828 &self,
829 mut payload: AllocatorBindSharedCollectionRequest,
830 ) -> Result<(), fidl::Error> {
831 AllocatorProxyInterface::r#bind_shared_collection(self, payload)
832 }
833
834 /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
835 /// the sysmem server.
836 ///
837 /// With this call, the client can determine whether an incoming token is a
838 /// real sysmem token that is known to the sysmem server, without any risk
839 /// of getting stuck waiting forever on a potentially fake token to complete
840 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
841 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
842 /// FIDL message). In cases where the client trusts the source of the token
843 /// to provide a real token, this call is not typically needed outside of
844 /// debugging.
845 ///
846 /// If the validate fails sometimes but succeeds other times, the source of
847 /// the token may itself not be calling
848 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
849 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
850 /// token but before sending the token to the current client. It may be more
851 /// convenient for the source to use
852 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
853 /// token(s), since that call has the sync step built in. Or, the buffer
854 /// collection may be failing before this call is processed by the sysmem
855 /// server, as buffer collection failure cleans up sysmem's tracking of
856 /// associated tokens.
857 ///
858 /// This call has no effect on any token.
859 ///
860 /// + request `token_server_koid` The koid of the server end of a channel
861 /// that might be a BufferCollectionToken channel. This can be obtained
862 /// via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
863 /// - response `is_known` true means sysmem knew of the token at the time
864 /// sysmem processed the request, but doesn't guarantee that the token is
865 /// still valid by the time the client receives the reply. What it does
866 /// guarantee is that the token at least was a real token, so a two-way
867 /// call to the token won't stall forever (will fail or succeed fairly
868 /// quickly, not stall). This can already be known implicitly if the
869 /// source of the token can be trusted to provide a real token. A false
870 /// value means the token wasn't known to sysmem at the time sysmem
871 /// processed this call, but the token may have previously been valid, or
872 /// may yet become valid. Or if the sender of the token isn't trusted to
873 /// provide a real token, the token may be fake. It's the responsibility
874 /// of the sender to sync with sysmem to ensure that previously
875 /// created/duplicated token(s) are known to sysmem, before sending the
876 /// token(s) to other participants.
877 pub fn r#validate_buffer_collection_token(
878 &self,
879 mut payload: &AllocatorValidateBufferCollectionTokenRequest,
880 ) -> fidl::client::QueryResponseFut<
881 AllocatorValidateBufferCollectionTokenResponse,
882 fidl::encoding::DefaultFuchsiaResourceDialect,
883 > {
884 AllocatorProxyInterface::r#validate_buffer_collection_token(self, payload)
885 }
886
887 /// Set information about the current client that can be used by sysmem to
888 /// help diagnose leaking memory and allocation stalls waiting for a
889 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
890 ///
891 /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
892 /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
893 /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
894 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
895 /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
896 /// these `BufferCollection`(s) have the same initial debug client info as
897 /// the token turned in to create the `BufferCollection`).
898 ///
899 /// This info can be subsequently overridden on a per-`Node` basis by
900 /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
901 ///
902 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
903 /// `Allocator` is the most efficient way to ensure that all
904 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
905 /// set, and is also more efficient than separately sending the same debug
906 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
907 /// created [`fuchsia.sysmem2/Node`].
908 ///
909 /// + request `name` This can be an arbitrary string, but the current
910 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
911 /// + request `id` This can be an arbitrary id, but the current process ID
912 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
913 pub fn r#set_debug_client_info(
914 &self,
915 mut payload: &AllocatorSetDebugClientInfoRequest,
916 ) -> Result<(), fidl::Error> {
917 AllocatorProxyInterface::r#set_debug_client_info(self, payload)
918 }
919
920 /// Given a handle to a sysmem-provided VMO, this returns additional info
921 /// about the corresponding sysmem logical buffer.
922 ///
923 /// Most callers will duplicate a VMO handle first and send the duplicate to
924 /// this call.
925 ///
926 /// If the client has created a child VMO of a sysmem-provided VMO, that
927 /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
928 ///
929 /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
930 /// - response `buffer_collection_id` The buffer collection ID, which is
931 /// unique per logical buffer collection per boot.
932 /// - response `buffer_index` The buffer index of the buffer within the
933 /// buffer collection. This is the same as the index of the buffer within
934 /// [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
935 /// is the same for all sysmem-delivered VMOs corresponding to the same
936 /// logical buffer, even if the VMO koids differ. The `buffer_index` is
937 /// only unique across buffers of a buffer collection. For a given buffer,
938 /// the combination of `buffer_collection_id` and `buffer_index` is unique
939 /// per boot.
940 /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
941 /// the `close_weak_asap` field will be set in the response. This handle
942 /// will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
943 /// the buffer should be closed as soon as possible. This is signalled
944 /// shortly after all strong sysmem VMOs to the buffer are closed
945 /// (including any held indirectly via strong `BufferCollectionToken` or
946 /// strong `BufferCollection`). Failure to close all weak sysmem VMO
947 /// handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
948 /// considered a VMO leak caused by the client still holding a weak sysmem
949 /// VMO handle and results in loud complaints to the log by sysmem. The
950 /// buffers of a collection can be freed independently of each other. The
951 /// `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
952 /// response arrives at the client. A client that isn't prepared to handle
953 /// weak sysmem VMOs, on seeing this field set, can close all handles to
954 /// the buffer and fail any associated request.
955 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
956 /// VMO. Both strong and weak sysmem VMOs can be passed to this call, and
957 /// the VMO handle passed in to this call itself keeps the VMO's info
958 /// alive for purposes of responding to this call. Because of this,
959 /// ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
960 /// handles to the VMO when calling; even if other handles are closed
961 /// before the GetVmoInfo response arrives at the client).
962 /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
963 /// capable of being used with GetVmoInfo due to rights/capability
964 /// attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
965 /// topic [`ZX_INFO_HANDLE_BASIC`].
966 /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
967 /// unspecified reason. See the log for more info.
968 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
969 /// wasn't set, or there was some other problem with the request field(s).
970 pub fn r#get_vmo_info(
971 &self,
972 mut payload: AllocatorGetVmoInfoRequest,
973 ) -> fidl::client::QueryResponseFut<
974 AllocatorGetVmoInfoResult,
975 fidl::encoding::DefaultFuchsiaResourceDialect,
976 > {
977 AllocatorProxyInterface::r#get_vmo_info(self, payload)
978 }
979}
980
981impl AllocatorProxyInterface for AllocatorProxy {
982 fn r#allocate_non_shared_collection(
983 &self,
984 mut payload: AllocatorAllocateNonSharedCollectionRequest,
985 ) -> Result<(), fidl::Error> {
986 self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
987 &mut payload,
988 0x5ca681f025a80e44,
989 fidl::encoding::DynamicFlags::FLEXIBLE,
990 )
991 }
992
993 fn r#allocate_shared_collection(
994 &self,
995 mut payload: AllocatorAllocateSharedCollectionRequest,
996 ) -> Result<(), fidl::Error> {
997 self.client.send::<AllocatorAllocateSharedCollectionRequest>(
998 &mut payload,
999 0x11a19ff51f0b49c1,
1000 fidl::encoding::DynamicFlags::FLEXIBLE,
1001 )
1002 }
1003
1004 fn r#bind_shared_collection(
1005 &self,
1006 mut payload: AllocatorBindSharedCollectionRequest,
1007 ) -> Result<(), fidl::Error> {
1008 self.client.send::<AllocatorBindSharedCollectionRequest>(
1009 &mut payload,
1010 0x550916b0dc1d5b4e,
1011 fidl::encoding::DynamicFlags::FLEXIBLE,
1012 )
1013 }
1014
1015 type ValidateBufferCollectionTokenResponseFut = fidl::client::QueryResponseFut<
1016 AllocatorValidateBufferCollectionTokenResponse,
1017 fidl::encoding::DefaultFuchsiaResourceDialect,
1018 >;
1019 fn r#validate_buffer_collection_token(
1020 &self,
1021 mut payload: &AllocatorValidateBufferCollectionTokenRequest,
1022 ) -> Self::ValidateBufferCollectionTokenResponseFut {
1023 fn _decode(
1024 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1025 ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
1026 let _response = fidl::client::decode_transaction_body::<
1027 fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
1028 fidl::encoding::DefaultFuchsiaResourceDialect,
1029 0x4c5ee91b02a7e68d,
1030 >(_buf?)?
1031 .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
1032 Ok(_response)
1033 }
1034 self.client.send_query_and_decode::<
1035 AllocatorValidateBufferCollectionTokenRequest,
1036 AllocatorValidateBufferCollectionTokenResponse,
1037 >(
1038 payload,
1039 0x4c5ee91b02a7e68d,
1040 fidl::encoding::DynamicFlags::FLEXIBLE,
1041 _decode,
1042 )
1043 }
1044
1045 fn r#set_debug_client_info(
1046 &self,
1047 mut payload: &AllocatorSetDebugClientInfoRequest,
1048 ) -> Result<(), fidl::Error> {
1049 self.client.send::<AllocatorSetDebugClientInfoRequest>(
1050 payload,
1051 0x6f68f19a3f509c4d,
1052 fidl::encoding::DynamicFlags::FLEXIBLE,
1053 )
1054 }
1055
1056 type GetVmoInfoResponseFut = fidl::client::QueryResponseFut<
1057 AllocatorGetVmoInfoResult,
1058 fidl::encoding::DefaultFuchsiaResourceDialect,
1059 >;
1060 fn r#get_vmo_info(
1061 &self,
1062 mut payload: AllocatorGetVmoInfoRequest,
1063 ) -> Self::GetVmoInfoResponseFut {
1064 fn _decode(
1065 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1066 ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
1067 let _response = fidl::client::decode_transaction_body::<
1068 fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
1069 fidl::encoding::DefaultFuchsiaResourceDialect,
1070 0x21a881120aa0ddf9,
1071 >(_buf?)?
1072 .into_result::<AllocatorMarker>("get_vmo_info")?;
1073 Ok(_response.map(|x| x))
1074 }
1075 self.client.send_query_and_decode::<AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResult>(
1076 &mut payload,
1077 0x21a881120aa0ddf9,
1078 fidl::encoding::DynamicFlags::FLEXIBLE,
1079 _decode,
1080 )
1081 }
1082}
1083
1084pub struct AllocatorEventStream {
1085 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
1086}
1087
1088impl std::marker::Unpin for AllocatorEventStream {}
1089
1090impl futures::stream::FusedStream for AllocatorEventStream {
1091 fn is_terminated(&self) -> bool {
1092 self.event_receiver.is_terminated()
1093 }
1094}
1095
1096impl futures::Stream for AllocatorEventStream {
1097 type Item = Result<AllocatorEvent, fidl::Error>;
1098
1099 fn poll_next(
1100 mut self: std::pin::Pin<&mut Self>,
1101 cx: &mut std::task::Context<'_>,
1102 ) -> std::task::Poll<Option<Self::Item>> {
1103 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
1104 &mut self.event_receiver,
1105 cx
1106 )?) {
1107 Some(buf) => std::task::Poll::Ready(Some(AllocatorEvent::decode(buf))),
1108 None => std::task::Poll::Ready(None),
1109 }
1110 }
1111}
1112
1113#[derive(Debug)]
1114pub enum AllocatorEvent {
1115 #[non_exhaustive]
1116 _UnknownEvent {
1117 /// Ordinal of the event that was sent.
1118 ordinal: u64,
1119 },
1120}
1121
1122impl AllocatorEvent {
1123 /// Decodes a message buffer as a [`AllocatorEvent`].
1124 fn decode(
1125 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
1126 ) -> Result<AllocatorEvent, fidl::Error> {
1127 let (bytes, _handles) = buf.split_mut();
1128 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1129 debug_assert_eq!(tx_header.tx_id, 0);
1130 match tx_header.ordinal {
1131 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
1132 Ok(AllocatorEvent::_UnknownEvent { ordinal: tx_header.ordinal })
1133 }
1134 _ => Err(fidl::Error::UnknownOrdinal {
1135 ordinal: tx_header.ordinal,
1136 protocol_name: <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1137 }),
1138 }
1139 }
1140}
1141
1142/// A Stream of incoming requests for fuchsia.sysmem2/Allocator.
1143pub struct AllocatorRequestStream {
1144 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1145 is_terminated: bool,
1146}
1147
1148impl std::marker::Unpin for AllocatorRequestStream {}
1149
1150impl futures::stream::FusedStream for AllocatorRequestStream {
1151 fn is_terminated(&self) -> bool {
1152 self.is_terminated
1153 }
1154}
1155
1156impl fidl::endpoints::RequestStream for AllocatorRequestStream {
1157 type Protocol = AllocatorMarker;
1158 type ControlHandle = AllocatorControlHandle;
1159
1160 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
1161 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
1162 }
1163
1164 fn control_handle(&self) -> Self::ControlHandle {
1165 AllocatorControlHandle { inner: self.inner.clone() }
1166 }
1167
1168 fn into_inner(
1169 self,
1170 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
1171 {
1172 (self.inner, self.is_terminated)
1173 }
1174
1175 fn from_inner(
1176 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1177 is_terminated: bool,
1178 ) -> Self {
1179 Self { inner, is_terminated }
1180 }
1181}
1182
1183impl futures::Stream for AllocatorRequestStream {
1184 type Item = Result<AllocatorRequest, fidl::Error>;
1185
1186 fn poll_next(
1187 mut self: std::pin::Pin<&mut Self>,
1188 cx: &mut std::task::Context<'_>,
1189 ) -> std::task::Poll<Option<Self::Item>> {
1190 let this = &mut *self;
1191 if this.inner.check_shutdown(cx) {
1192 this.is_terminated = true;
1193 return std::task::Poll::Ready(None);
1194 }
1195 if this.is_terminated {
1196 panic!("polled AllocatorRequestStream after completion");
1197 }
1198 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
1199 |bytes, handles| {
1200 match this.inner.channel().read_etc(cx, bytes, handles) {
1201 std::task::Poll::Ready(Ok(())) => {}
1202 std::task::Poll::Pending => return std::task::Poll::Pending,
1203 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
1204 this.is_terminated = true;
1205 return std::task::Poll::Ready(None);
1206 }
1207 std::task::Poll::Ready(Err(e)) => {
1208 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
1209 e.into(),
1210 ))));
1211 }
1212 }
1213
1214 // A message has been received from the channel
1215 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1216
1217 std::task::Poll::Ready(Some(match header.ordinal {
1218 0x5ca681f025a80e44 => {
1219 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1220 let mut req = fidl::new_empty!(
1221 AllocatorAllocateNonSharedCollectionRequest,
1222 fidl::encoding::DefaultFuchsiaResourceDialect
1223 );
1224 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateNonSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1225 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1226 Ok(AllocatorRequest::AllocateNonSharedCollection {
1227 payload: req,
1228 control_handle,
1229 })
1230 }
1231 0x11a19ff51f0b49c1 => {
1232 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1233 let mut req = fidl::new_empty!(
1234 AllocatorAllocateSharedCollectionRequest,
1235 fidl::encoding::DefaultFuchsiaResourceDialect
1236 );
1237 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1238 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1239 Ok(AllocatorRequest::AllocateSharedCollection {
1240 payload: req,
1241 control_handle,
1242 })
1243 }
1244 0x550916b0dc1d5b4e => {
1245 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1246 let mut req = fidl::new_empty!(
1247 AllocatorBindSharedCollectionRequest,
1248 fidl::encoding::DefaultFuchsiaResourceDialect
1249 );
1250 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorBindSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1251 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1252 Ok(AllocatorRequest::BindSharedCollection { payload: req, control_handle })
1253 }
1254 0x4c5ee91b02a7e68d => {
1255 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1256 let mut req = fidl::new_empty!(
1257 AllocatorValidateBufferCollectionTokenRequest,
1258 fidl::encoding::DefaultFuchsiaResourceDialect
1259 );
1260 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorValidateBufferCollectionTokenRequest>(&header, _body_bytes, handles, &mut req)?;
1261 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1262 Ok(AllocatorRequest::ValidateBufferCollectionToken {
1263 payload: req,
1264 responder: AllocatorValidateBufferCollectionTokenResponder {
1265 control_handle: std::mem::ManuallyDrop::new(control_handle),
1266 tx_id: header.tx_id,
1267 },
1268 })
1269 }
1270 0x6f68f19a3f509c4d => {
1271 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1272 let mut req = fidl::new_empty!(
1273 AllocatorSetDebugClientInfoRequest,
1274 fidl::encoding::DefaultFuchsiaResourceDialect
1275 );
1276 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1277 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1278 Ok(AllocatorRequest::SetDebugClientInfo { payload: req, control_handle })
1279 }
1280 0x21a881120aa0ddf9 => {
1281 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1282 let mut req = fidl::new_empty!(
1283 AllocatorGetVmoInfoRequest,
1284 fidl::encoding::DefaultFuchsiaResourceDialect
1285 );
1286 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorGetVmoInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1287 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1288 Ok(AllocatorRequest::GetVmoInfo {
1289 payload: req,
1290 responder: AllocatorGetVmoInfoResponder {
1291 control_handle: std::mem::ManuallyDrop::new(control_handle),
1292 tx_id: header.tx_id,
1293 },
1294 })
1295 }
1296 _ if header.tx_id == 0
1297 && header
1298 .dynamic_flags()
1299 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1300 {
1301 Ok(AllocatorRequest::_UnknownMethod {
1302 ordinal: header.ordinal,
1303 control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1304 method_type: fidl::MethodType::OneWay,
1305 })
1306 }
1307 _ if header
1308 .dynamic_flags()
1309 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1310 {
1311 this.inner.send_framework_err(
1312 fidl::encoding::FrameworkErr::UnknownMethod,
1313 header.tx_id,
1314 header.ordinal,
1315 header.dynamic_flags(),
1316 (bytes, handles),
1317 )?;
1318 Ok(AllocatorRequest::_UnknownMethod {
1319 ordinal: header.ordinal,
1320 control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1321 method_type: fidl::MethodType::TwoWay,
1322 })
1323 }
1324 _ => Err(fidl::Error::UnknownOrdinal {
1325 ordinal: header.ordinal,
1326 protocol_name:
1327 <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1328 }),
1329 }))
1330 },
1331 )
1332 }
1333}
1334
1335/// Allocates system memory buffers.
1336///
1337/// Epitaphs are not used in this protocol.
1338#[derive(Debug)]
1339pub enum AllocatorRequest {
1340 /// Allocates a buffer collection on behalf of a single client (aka
1341 /// initiator) who is also the only participant (from the point of view of
1342 /// sysmem).
1343 ///
1344 /// This call exists mainly for temp/testing purposes. This call skips the
1345 /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
1346 /// allow another participant to specify its constraints.
1347 ///
1348 /// Real clients are encouraged to use
1349 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
1350 /// let relevant participants directly convey their own constraints to
1351 /// sysmem by sending `BufferCollectionToken`s to those participants.
1352 ///
1353 /// + request `collection_request` The server end of the
1354 /// [`fuchsia.sysmem2/BufferCollection`].
1355 AllocateNonSharedCollection {
1356 payload: AllocatorAllocateNonSharedCollectionRequest,
1357 control_handle: AllocatorControlHandle,
1358 },
1359 /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
1360 ///
1361 /// The `BufferCollectionToken` can be "duplicated" for distribution to
1362 /// participants by using
1363 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
1364 /// `BufferCollectionToken` can be converted into a
1365 /// [`fuchsia.sysmem2.BufferCollection`] using
1366 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
1367 ///
1368 /// Buffer constraints can be set via
1369 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1370 ///
1371 /// Success/failure to populate the buffer collection with buffers can be
1372 /// determined from
1373 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
1374 ///
1375 /// Closing the client end of a `BufferCollectionToken` or
1376 /// `BufferCollection` (without `Release` first) will fail all client ends
1377 /// in the same failure domain, which by default is all client ends of the
1378 /// buffer collection. See
1379 /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
1380 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
1381 /// separate failure domains within a buffer collection.
1382 AllocateSharedCollection {
1383 payload: AllocatorAllocateSharedCollectionRequest,
1384 control_handle: AllocatorControlHandle,
1385 },
1386 /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
1387 /// [`fuchsia.sysmem2/BufferCollection`].
1388 ///
1389 /// At the time of sending this message, the buffer collection hasn't yet
1390 /// been populated with buffers - the participant must first also send
1391 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
1392 /// `BufferCollection` client end.
1393 ///
1394 /// All `BufferCollectionToken`(s) duplicated from a root
1395 /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
1396 /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
1397 /// existing `BufferCollection` client ends must have sent `SetConstraints`
1398 /// before the logical BufferCollection will be populated with buffers (or
1399 /// will fail if the overall set of constraints can't be satisfied).
1400 ///
1401 /// + request `token` The client endpoint of a channel whose server end was
1402 /// sent to sysmem using
1403 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
1404 /// end was sent to sysmem using
1405 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. The token is
1406 /// being "turned in" in exchange for a
1407 /// [`fuchsia.sysmem2/BufferCollection`].
1408 /// + request `buffer_collection_request` The server end of a
1409 /// [`fuchsia.sysmem2/BufferCollection`] channel. The sender retains the
1410 /// client end. The `BufferCollection` channel is a single participant's
1411 /// connection to the logical buffer collection. Typically there will be
1412 /// other participants with their own `BufferCollection` channel to the
1413 /// logical buffer collection.
1414 BindSharedCollection {
1415 payload: AllocatorBindSharedCollectionRequest,
1416 control_handle: AllocatorControlHandle,
1417 },
1418 /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
1419 /// the sysmem server.
1420 ///
1421 /// With this call, the client can determine whether an incoming token is a
1422 /// real sysmem token that is known to the sysmem server, without any risk
1423 /// of getting stuck waiting forever on a potentially fake token to complete
1424 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
1425 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
1426 /// FIDL message). In cases where the client trusts the source of the token
1427 /// to provide a real token, this call is not typically needed outside of
1428 /// debugging.
1429 ///
1430 /// If the validate fails sometimes but succeeds other times, the source of
1431 /// the token may itself not be calling
1432 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
1433 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
1434 /// token but before sending the token to the current client. It may be more
1435 /// convenient for the source to use
1436 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
1437 /// token(s), since that call has the sync step built in. Or, the buffer
1438 /// collection may be failing before this call is processed by the sysmem
1439 /// server, as buffer collection failure cleans up sysmem's tracking of
1440 /// associated tokens.
1441 ///
1442 /// This call has no effect on any token.
1443 ///
1444 /// + request `token_server_koid` The koid of the server end of a channel
1445 /// that might be a BufferCollectionToken channel. This can be obtained
1446 /// via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
1447 /// - response `is_known` true means sysmem knew of the token at the time
1448 /// sysmem processed the request, but doesn't guarantee that the token is
1449 /// still valid by the time the client receives the reply. What it does
1450 /// guarantee is that the token at least was a real token, so a two-way
1451 /// call to the token won't stall forever (will fail or succeed fairly
1452 /// quickly, not stall). This can already be known implicitly if the
1453 /// source of the token can be trusted to provide a real token. A false
1454 /// value means the token wasn't known to sysmem at the time sysmem
1455 /// processed this call, but the token may have previously been valid, or
1456 /// may yet become valid. Or if the sender of the token isn't trusted to
1457 /// provide a real token, the token may be fake. It's the responsibility
1458 /// of the sender to sync with sysmem to ensure that previously
1459 /// created/duplicated token(s) are known to sysmem, before sending the
1460 /// token(s) to other participants.
1461 ValidateBufferCollectionToken {
1462 payload: AllocatorValidateBufferCollectionTokenRequest,
1463 responder: AllocatorValidateBufferCollectionTokenResponder,
1464 },
1465 /// Set information about the current client that can be used by sysmem to
1466 /// help diagnose leaking memory and allocation stalls waiting for a
1467 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1468 ///
1469 /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
1470 /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
1471 /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
1472 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
1473 /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
1474 /// these `BufferCollection`(s) have the same initial debug client info as
1475 /// the token turned in to create the `BufferCollection`).
1476 ///
1477 /// This info can be subsequently overridden on a per-`Node` basis by
1478 /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
1479 ///
1480 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
1481 /// `Allocator` is the most efficient way to ensure that all
1482 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
1483 /// set, and is also more efficient than separately sending the same debug
1484 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
1485 /// created [`fuchsia.sysmem2/Node`].
1486 ///
1487 /// + request `name` This can be an arbitrary string, but the current
1488 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
1489 /// + request `id` This can be an arbitrary id, but the current process ID
1490 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
1491 SetDebugClientInfo {
1492 payload: AllocatorSetDebugClientInfoRequest,
1493 control_handle: AllocatorControlHandle,
1494 },
1495 /// Given a handle to a sysmem-provided VMO, this returns additional info
1496 /// about the corresponding sysmem logical buffer.
1497 ///
1498 /// Most callers will duplicate a VMO handle first and send the duplicate to
1499 /// this call.
1500 ///
1501 /// If the client has created a child VMO of a sysmem-provided VMO, that
1502 /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
1503 ///
1504 /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
1505 /// - response `buffer_collection_id` The buffer collection ID, which is
1506 /// unique per logical buffer collection per boot.
1507 /// - response `buffer_index` The buffer index of the buffer within the
1508 /// buffer collection. This is the same as the index of the buffer within
1509 /// [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
1510 /// is the same for all sysmem-delivered VMOs corresponding to the same
1511 /// logical buffer, even if the VMO koids differ. The `buffer_index` is
1512 /// only unique across buffers of a buffer collection. For a given buffer,
1513 /// the combination of `buffer_collection_id` and `buffer_index` is unique
1514 /// per boot.
1515 /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
1516 /// the `close_weak_asap` field will be set in the response. This handle
1517 /// will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
1518 /// the buffer should be closed as soon as possible. This is signalled
1519 /// shortly after all strong sysmem VMOs to the buffer are closed
1520 /// (including any held indirectly via strong `BufferCollectionToken` or
1521 /// strong `BufferCollection`). Failure to close all weak sysmem VMO
1522 /// handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
1523 /// considered a VMO leak caused by the client still holding a weak sysmem
1524 /// VMO handle and results in loud complaints to the log by sysmem. The
1525 /// buffers of a collection can be freed independently of each other. The
1526 /// `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
1527 /// response arrives at the client. A client that isn't prepared to handle
1528 /// weak sysmem VMOs, on seeing this field set, can close all handles to
1529 /// the buffer and fail any associated request.
1530 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
1531 /// VMO. Both strong and weak sysmem VMOs can be passed to this call, and
1532 /// the VMO handle passed in to this call itself keeps the VMO's info
1533 /// alive for purposes of responding to this call. Because of this,
1534 /// ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
1535 /// handles to the VMO when calling; even if other handles are closed
1536 /// before the GetVmoInfo response arrives at the client).
1537 /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
1538 /// capable of being used with GetVmoInfo due to rights/capability
1539 /// attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
1540 /// topic [`ZX_INFO_HANDLE_BASIC`].
1541 /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
1542 /// unspecified reason. See the log for more info.
1543 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
1544 /// wasn't set, or there was some other problem with the request field(s).
1545 GetVmoInfo { payload: AllocatorGetVmoInfoRequest, responder: AllocatorGetVmoInfoResponder },
1546 /// An interaction was received which does not match any known method.
1547 #[non_exhaustive]
1548 _UnknownMethod {
1549 /// Ordinal of the method that was called.
1550 ordinal: u64,
1551 control_handle: AllocatorControlHandle,
1552 method_type: fidl::MethodType,
1553 },
1554}
1555
1556impl AllocatorRequest {
1557 #[allow(irrefutable_let_patterns)]
1558 pub fn into_allocate_non_shared_collection(
1559 self,
1560 ) -> Option<(AllocatorAllocateNonSharedCollectionRequest, AllocatorControlHandle)> {
1561 if let AllocatorRequest::AllocateNonSharedCollection { payload, control_handle } = self {
1562 Some((payload, control_handle))
1563 } else {
1564 None
1565 }
1566 }
1567
1568 #[allow(irrefutable_let_patterns)]
1569 pub fn into_allocate_shared_collection(
1570 self,
1571 ) -> Option<(AllocatorAllocateSharedCollectionRequest, AllocatorControlHandle)> {
1572 if let AllocatorRequest::AllocateSharedCollection { payload, control_handle } = self {
1573 Some((payload, control_handle))
1574 } else {
1575 None
1576 }
1577 }
1578
1579 #[allow(irrefutable_let_patterns)]
1580 pub fn into_bind_shared_collection(
1581 self,
1582 ) -> Option<(AllocatorBindSharedCollectionRequest, AllocatorControlHandle)> {
1583 if let AllocatorRequest::BindSharedCollection { payload, control_handle } = self {
1584 Some((payload, control_handle))
1585 } else {
1586 None
1587 }
1588 }
1589
1590 #[allow(irrefutable_let_patterns)]
1591 pub fn into_validate_buffer_collection_token(
1592 self,
1593 ) -> Option<(
1594 AllocatorValidateBufferCollectionTokenRequest,
1595 AllocatorValidateBufferCollectionTokenResponder,
1596 )> {
1597 if let AllocatorRequest::ValidateBufferCollectionToken { payload, responder } = self {
1598 Some((payload, responder))
1599 } else {
1600 None
1601 }
1602 }
1603
1604 #[allow(irrefutable_let_patterns)]
1605 pub fn into_set_debug_client_info(
1606 self,
1607 ) -> Option<(AllocatorSetDebugClientInfoRequest, AllocatorControlHandle)> {
1608 if let AllocatorRequest::SetDebugClientInfo { payload, control_handle } = self {
1609 Some((payload, control_handle))
1610 } else {
1611 None
1612 }
1613 }
1614
1615 #[allow(irrefutable_let_patterns)]
1616 pub fn into_get_vmo_info(
1617 self,
1618 ) -> Option<(AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResponder)> {
1619 if let AllocatorRequest::GetVmoInfo { payload, responder } = self {
1620 Some((payload, responder))
1621 } else {
1622 None
1623 }
1624 }
1625
1626 /// Name of the method defined in FIDL
1627 pub fn method_name(&self) -> &'static str {
1628 match *self {
1629 AllocatorRequest::AllocateNonSharedCollection { .. } => {
1630 "allocate_non_shared_collection"
1631 }
1632 AllocatorRequest::AllocateSharedCollection { .. } => "allocate_shared_collection",
1633 AllocatorRequest::BindSharedCollection { .. } => "bind_shared_collection",
1634 AllocatorRequest::ValidateBufferCollectionToken { .. } => {
1635 "validate_buffer_collection_token"
1636 }
1637 AllocatorRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
1638 AllocatorRequest::GetVmoInfo { .. } => "get_vmo_info",
1639 AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
1640 "unknown one-way method"
1641 }
1642 AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
1643 "unknown two-way method"
1644 }
1645 }
1646 }
1647}
1648
1649#[derive(Debug, Clone)]
1650pub struct AllocatorControlHandle {
1651 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1652}
1653
1654impl fidl::endpoints::ControlHandle for AllocatorControlHandle {
1655 fn shutdown(&self) {
1656 self.inner.shutdown()
1657 }
1658
1659 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
1660 self.inner.shutdown_with_epitaph(status)
1661 }
1662
1663 fn is_closed(&self) -> bool {
1664 self.inner.channel().is_closed()
1665 }
1666 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
1667 self.inner.channel().on_closed()
1668 }
1669
1670 #[cfg(target_os = "fuchsia")]
1671 fn signal_peer(
1672 &self,
1673 clear_mask: zx::Signals,
1674 set_mask: zx::Signals,
1675 ) -> Result<(), zx_status::Status> {
1676 use fidl::Peered;
1677 self.inner.channel().signal_peer(clear_mask, set_mask)
1678 }
1679}
1680
1681impl AllocatorControlHandle {}
1682
1683#[must_use = "FIDL methods require a response to be sent"]
1684#[derive(Debug)]
1685pub struct AllocatorValidateBufferCollectionTokenResponder {
1686 control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1687 tx_id: u32,
1688}
1689
1690/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1691/// if the responder is dropped without sending a response, so that the client
1692/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1693impl std::ops::Drop for AllocatorValidateBufferCollectionTokenResponder {
1694 fn drop(&mut self) {
1695 self.control_handle.shutdown();
1696 // Safety: drops once, never accessed again
1697 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1698 }
1699}
1700
1701impl fidl::endpoints::Responder for AllocatorValidateBufferCollectionTokenResponder {
1702 type ControlHandle = AllocatorControlHandle;
1703
1704 fn control_handle(&self) -> &AllocatorControlHandle {
1705 &self.control_handle
1706 }
1707
1708 fn drop_without_shutdown(mut self) {
1709 // Safety: drops once, never accessed again due to mem::forget
1710 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1711 // Prevent Drop from running (which would shut down the channel)
1712 std::mem::forget(self);
1713 }
1714}
1715
1716impl AllocatorValidateBufferCollectionTokenResponder {
1717 /// Sends a response to the FIDL transaction.
1718 ///
1719 /// Sets the channel to shutdown if an error occurs.
1720 pub fn send(
1721 self,
1722 mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1723 ) -> Result<(), fidl::Error> {
1724 let _result = self.send_raw(payload);
1725 if _result.is_err() {
1726 self.control_handle.shutdown();
1727 }
1728 self.drop_without_shutdown();
1729 _result
1730 }
1731
1732 /// Similar to "send" but does not shutdown the channel if an error occurs.
1733 pub fn send_no_shutdown_on_err(
1734 self,
1735 mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1736 ) -> Result<(), fidl::Error> {
1737 let _result = self.send_raw(payload);
1738 self.drop_without_shutdown();
1739 _result
1740 }
1741
1742 fn send_raw(
1743 &self,
1744 mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1745 ) -> Result<(), fidl::Error> {
1746 self.control_handle.inner.send::<fidl::encoding::FlexibleType<
1747 AllocatorValidateBufferCollectionTokenResponse,
1748 >>(
1749 fidl::encoding::Flexible::new(payload),
1750 self.tx_id,
1751 0x4c5ee91b02a7e68d,
1752 fidl::encoding::DynamicFlags::FLEXIBLE,
1753 )
1754 }
1755}
1756
1757#[must_use = "FIDL methods require a response to be sent"]
1758#[derive(Debug)]
1759pub struct AllocatorGetVmoInfoResponder {
1760 control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1761 tx_id: u32,
1762}
1763
1764/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1765/// if the responder is dropped without sending a response, so that the client
1766/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1767impl std::ops::Drop for AllocatorGetVmoInfoResponder {
1768 fn drop(&mut self) {
1769 self.control_handle.shutdown();
1770 // Safety: drops once, never accessed again
1771 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1772 }
1773}
1774
1775impl fidl::endpoints::Responder for AllocatorGetVmoInfoResponder {
1776 type ControlHandle = AllocatorControlHandle;
1777
1778 fn control_handle(&self) -> &AllocatorControlHandle {
1779 &self.control_handle
1780 }
1781
1782 fn drop_without_shutdown(mut self) {
1783 // Safety: drops once, never accessed again due to mem::forget
1784 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1785 // Prevent Drop from running (which would shut down the channel)
1786 std::mem::forget(self);
1787 }
1788}
1789
1790impl AllocatorGetVmoInfoResponder {
1791 /// Sends a response to the FIDL transaction.
1792 ///
1793 /// Sets the channel to shutdown if an error occurs.
1794 pub fn send(
1795 self,
1796 mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1797 ) -> Result<(), fidl::Error> {
1798 let _result = self.send_raw(result);
1799 if _result.is_err() {
1800 self.control_handle.shutdown();
1801 }
1802 self.drop_without_shutdown();
1803 _result
1804 }
1805
1806 /// Similar to "send" but does not shutdown the channel if an error occurs.
1807 pub fn send_no_shutdown_on_err(
1808 self,
1809 mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1810 ) -> Result<(), fidl::Error> {
1811 let _result = self.send_raw(result);
1812 self.drop_without_shutdown();
1813 _result
1814 }
1815
1816 fn send_raw(
1817 &self,
1818 mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1819 ) -> Result<(), fidl::Error> {
1820 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
1821 AllocatorGetVmoInfoResponse,
1822 Error,
1823 >>(
1824 fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
1825 self.tx_id,
1826 0x21a881120aa0ddf9,
1827 fidl::encoding::DynamicFlags::FLEXIBLE,
1828 )
1829 }
1830}
1831
1832#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
1833pub struct BufferCollectionMarker;
1834
1835impl fidl::endpoints::ProtocolMarker for BufferCollectionMarker {
1836 type Proxy = BufferCollectionProxy;
1837 type RequestStream = BufferCollectionRequestStream;
1838 #[cfg(target_os = "fuchsia")]
1839 type SynchronousProxy = BufferCollectionSynchronousProxy;
1840
1841 const DEBUG_NAME: &'static str = "(anonymous) BufferCollection";
1842}
1843pub type BufferCollectionWaitForAllBuffersAllocatedResult =
1844 Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>;
1845pub type BufferCollectionCheckAllBuffersAllocatedResult = Result<(), Error>;
1846
1847pub trait BufferCollectionProxyInterface: Send + Sync {
1848 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
1849 fn r#sync(&self) -> Self::SyncResponseFut;
1850 fn r#release(&self) -> Result<(), fidl::Error>;
1851 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
1852 fn r#set_debug_client_info(
1853 &self,
1854 payload: &NodeSetDebugClientInfoRequest,
1855 ) -> Result<(), fidl::Error>;
1856 fn r#set_debug_timeout_log_deadline(
1857 &self,
1858 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
1859 ) -> Result<(), fidl::Error>;
1860 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
1861 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
1862 + Send;
1863 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
1864 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
1865 + Send;
1866 fn r#is_alternate_for(
1867 &self,
1868 payload: NodeIsAlternateForRequest,
1869 ) -> Self::IsAlternateForResponseFut;
1870 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
1871 + Send;
1872 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
1873 fn r#set_weak(&self) -> Result<(), fidl::Error>;
1874 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
1875 fn r#attach_node_tracking(
1876 &self,
1877 payload: NodeAttachNodeTrackingRequest,
1878 ) -> Result<(), fidl::Error>;
1879 fn r#set_constraints(
1880 &self,
1881 payload: BufferCollectionSetConstraintsRequest,
1882 ) -> Result<(), fidl::Error>;
1883 type WaitForAllBuffersAllocatedResponseFut: std::future::Future<
1884 Output = Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error>,
1885 > + Send;
1886 fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut;
1887 type CheckAllBuffersAllocatedResponseFut: std::future::Future<
1888 Output = Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error>,
1889 > + Send;
1890 fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut;
1891 fn r#attach_token(
1892 &self,
1893 payload: BufferCollectionAttachTokenRequest,
1894 ) -> Result<(), fidl::Error>;
1895 fn r#attach_lifetime_tracking(
1896 &self,
1897 payload: BufferCollectionAttachLifetimeTrackingRequest,
1898 ) -> Result<(), fidl::Error>;
1899}
1900#[derive(Debug)]
1901#[cfg(target_os = "fuchsia")]
1902pub struct BufferCollectionSynchronousProxy {
1903 client: fidl::client::sync::Client,
1904}
1905
1906#[cfg(target_os = "fuchsia")]
1907impl fidl::endpoints::SynchronousProxy for BufferCollectionSynchronousProxy {
1908 type Proxy = BufferCollectionProxy;
1909 type Protocol = BufferCollectionMarker;
1910
1911 fn from_channel(inner: fidl::Channel) -> Self {
1912 Self::new(inner)
1913 }
1914
1915 fn into_channel(self) -> fidl::Channel {
1916 self.client.into_channel()
1917 }
1918
1919 fn as_channel(&self) -> &fidl::Channel {
1920 self.client.as_channel()
1921 }
1922}
1923
1924#[cfg(target_os = "fuchsia")]
1925impl BufferCollectionSynchronousProxy {
1926 pub fn new(channel: fidl::Channel) -> Self {
1927 Self { client: fidl::client::sync::Client::new(channel) }
1928 }
1929
1930 pub fn into_channel(self) -> fidl::Channel {
1931 self.client.into_channel()
1932 }
1933
1934 /// Waits until an event arrives and returns it. It is safe for other
1935 /// threads to make concurrent requests while waiting for an event.
1936 pub fn wait_for_event(
1937 &self,
1938 deadline: zx::MonotonicInstant,
1939 ) -> Result<BufferCollectionEvent, fidl::Error> {
1940 BufferCollectionEvent::decode(
1941 self.client.wait_for_event::<BufferCollectionMarker>(deadline)?,
1942 )
1943 }
1944
1945 /// Ensure that previous messages have been received server side. This is
1946 /// particularly useful after previous messages that created new tokens,
1947 /// because a token must be known to the sysmem server before sending the
1948 /// token to another participant.
1949 ///
1950 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
1951 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
1952 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
1953 /// to mitigate the possibility of a hostile/fake
1954 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
1955 /// Another way is to pass the token to
1956 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
1957 /// the token as part of exchanging it for a
1958 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
1959 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
1960 /// of stalling.
1961 ///
1962 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
1963 /// and then starting and completing a `Sync`, it's then safe to send the
1964 /// `BufferCollectionToken` client ends to other participants knowing the
1965 /// server will recognize the tokens when they're sent by the other
1966 /// participants to sysmem in a
1967 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
1968 /// efficient way to create tokens while avoiding unnecessary round trips.
1969 ///
1970 /// Other options include waiting for each
1971 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
1972 /// individually (using separate call to `Sync` after each), or calling
1973 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
1974 /// converted to a `BufferCollection` via
1975 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
1976 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
1977 /// the sync step and can create multiple tokens at once.
1978 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
1979 let _response = self.client.send_query::<
1980 fidl::encoding::EmptyPayload,
1981 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
1982 BufferCollectionMarker,
1983 >(
1984 (),
1985 0x11ac2555cf575b54,
1986 fidl::encoding::DynamicFlags::FLEXIBLE,
1987 ___deadline,
1988 )?
1989 .into_result::<BufferCollectionMarker>("sync")?;
1990 Ok(_response)
1991 }
1992
1993 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
1994 ///
1995 /// Normally a participant will convert a `BufferCollectionToken` into a
1996 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
1997 /// `Release` via the token (and then close the channel immediately or
1998 /// shortly later in response to server closing the server end), which
1999 /// avoids causing buffer collection failure. Without a prior `Release`,
2000 /// closing the `BufferCollectionToken` client end will cause buffer
2001 /// collection failure.
2002 ///
2003 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
2004 ///
2005 /// By default the server handles unexpected closure of a
2006 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
2007 /// first) by failing the buffer collection. Partly this is to expedite
2008 /// closing VMO handles to reclaim memory when any participant fails. If a
2009 /// participant would like to cleanly close a `BufferCollection` without
2010 /// causing buffer collection failure, the participant can send `Release`
2011 /// before closing the `BufferCollection` client end. The `Release` can
2012 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
2013 /// buffer collection won't require constraints from this node in order to
2014 /// allocate. If after `SetConstraints`, the constraints are retained and
2015 /// aggregated, despite the lack of `BufferCollection` connection at the
2016 /// time of constraints aggregation.
2017 ///
2018 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2019 ///
2020 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2021 /// end (without `Release` first) will trigger failure of the buffer
2022 /// collection. To close a `BufferCollectionTokenGroup` channel without
2023 /// failing the buffer collection, ensure that AllChildrenPresent() has been
2024 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2025 /// client end.
2026 ///
2027 /// If `Release` occurs before
2028 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2029 /// buffer collection will fail (triggered by reception of `Release` without
2030 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2031 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2032 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2033 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2034 /// close requires `AllChildrenPresent` (if not already sent), then
2035 /// `Release`, then close client end.
2036 ///
2037 /// If `Release` occurs after `AllChildrenPresent`, the children and all
2038 /// their constraints remain intact (just as they would if the
2039 /// `BufferCollectionTokenGroup` channel had remained open), and the client
2040 /// end close doesn't trigger buffer collection failure.
2041 ///
2042 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2043 ///
2044 /// For brevity, the per-channel-protocol paragraphs above ignore the
2045 /// separate failure domain created by
2046 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2047 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2048 /// unexpectedly closes (without `Release` first) and that client end is
2049 /// under a failure domain, instead of failing the whole buffer collection,
2050 /// the failure domain is failed, but the buffer collection itself is
2051 /// isolated from failure of the failure domain. Such failure domains can be
2052 /// nested, in which case only the inner-most failure domain in which the
2053 /// `Node` resides fails.
2054 pub fn r#release(&self) -> Result<(), fidl::Error> {
2055 self.client.send::<fidl::encoding::EmptyPayload>(
2056 (),
2057 0x6a5cae7d6d6e04c6,
2058 fidl::encoding::DynamicFlags::FLEXIBLE,
2059 )
2060 }
2061
2062 /// Set a name for VMOs in this buffer collection.
2063 ///
2064 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2065 /// will be truncated to fit. The name of the vmo will be suffixed with the
2066 /// buffer index within the collection (if the suffix fits within
2067 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2068 /// listed in the inspect data.
2069 ///
2070 /// The name only affects VMOs allocated after the name is set; this call
2071 /// does not rename existing VMOs. If multiple clients set different names
2072 /// then the larger priority value will win. Setting a new name with the
2073 /// same priority as a prior name doesn't change the name.
2074 ///
2075 /// All table fields are currently required.
2076 ///
2077 /// + request `priority` The name is only set if this is the first `SetName`
2078 /// or if `priority` is greater than any previous `priority` value in
2079 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
2080 /// + request `name` The name for VMOs created under this buffer collection.
2081 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2082 self.client.send::<NodeSetNameRequest>(
2083 payload,
2084 0xb41f1624f48c1e9,
2085 fidl::encoding::DynamicFlags::FLEXIBLE,
2086 )
2087 }
2088
2089 /// Set information about the current client that can be used by sysmem to
2090 /// help diagnose leaking memory and allocation stalls waiting for a
2091 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2092 ///
2093 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2094 /// `Node`(s) derived from this `Node`, unless overriden by
2095 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2096 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2097 ///
2098 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2099 /// `Allocator` is the most efficient way to ensure that all
2100 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2101 /// set, and is also more efficient than separately sending the same debug
2102 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2103 /// created [`fuchsia.sysmem2/Node`].
2104 ///
2105 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2106 /// indicate which client is closing their channel first, leading to subtree
2107 /// failure (which can be normal if the purpose of the subtree is over, but
2108 /// if happening earlier than expected, the client-channel-specific name can
2109 /// help diagnose where the failure is first coming from, from sysmem's
2110 /// point of view).
2111 ///
2112 /// All table fields are currently required.
2113 ///
2114 /// + request `name` This can be an arbitrary string, but the current
2115 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
2116 /// + request `id` This can be an arbitrary id, but the current process ID
2117 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
2118 pub fn r#set_debug_client_info(
2119 &self,
2120 mut payload: &NodeSetDebugClientInfoRequest,
2121 ) -> Result<(), fidl::Error> {
2122 self.client.send::<NodeSetDebugClientInfoRequest>(
2123 payload,
2124 0x5cde8914608d99b1,
2125 fidl::encoding::DynamicFlags::FLEXIBLE,
2126 )
2127 }
2128
2129 /// Sysmem logs a warning if sysmem hasn't seen
2130 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
2131 /// within 5 seconds after creation of a new collection.
2132 ///
2133 /// Clients can call this method to change when the log is printed. If
2134 /// multiple client set the deadline, it's unspecified which deadline will
2135 /// take effect.
2136 ///
2137 /// In most cases the default works well.
2138 ///
2139 /// All table fields are currently required.
2140 ///
2141 /// + request `deadline` The time at which sysmem will start trying to log
2142 /// the warning, unless all constraints are with sysmem by then.
2143 pub fn r#set_debug_timeout_log_deadline(
2144 &self,
2145 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
2146 ) -> Result<(), fidl::Error> {
2147 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
2148 payload,
2149 0x716b0af13d5c0806,
2150 fidl::encoding::DynamicFlags::FLEXIBLE,
2151 )
2152 }
2153
2154 /// This enables verbose logging for the buffer collection.
2155 ///
2156 /// Verbose logging includes constraints set via
2157 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
2158 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
2159 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
2160 /// the tree of `Node`(s).
2161 ///
2162 /// Normally sysmem prints only a single line complaint when aggregation
2163 /// fails, with just the specific detailed reason that aggregation failed,
2164 /// with little surrounding context. While this is often enough to diagnose
2165 /// a problem if only a small change was made and everything was working
2166 /// before the small change, it's often not particularly helpful for getting
2167 /// a new buffer collection to work for the first time. Especially with
2168 /// more complex trees of nodes, involving things like
2169 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
2170 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
2171 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
2172 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
2173 /// looks like and why it's failing a logical allocation, or why a tree or
2174 /// subtree is failing sooner than expected.
2175 ///
2176 /// The intent of the extra logging is to be acceptable from a performance
2177 /// point of view, under the assumption that verbose logging is only enabled
2178 /// on a low number of buffer collections. If we're not tracking down a bug,
2179 /// we shouldn't send this message.
2180 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
2181 self.client.send::<fidl::encoding::EmptyPayload>(
2182 (),
2183 0x5209c77415b4dfad,
2184 fidl::encoding::DynamicFlags::FLEXIBLE,
2185 )
2186 }
2187
2188 /// This gets a handle that can be used as a parameter to
2189 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
2190 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
2191 /// client obtained this handle from this `Node`.
2192 ///
2193 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
2194 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
2195 /// despite the two calls typically being on different channels.
2196 ///
2197 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
2198 ///
2199 /// All table fields are currently required.
2200 ///
2201 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
2202 /// different `Node` channel, to prove that the client obtained the handle
2203 /// from this `Node`.
2204 pub fn r#get_node_ref(
2205 &self,
2206 ___deadline: zx::MonotonicInstant,
2207 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
2208 let _response = self.client.send_query::<
2209 fidl::encoding::EmptyPayload,
2210 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
2211 BufferCollectionMarker,
2212 >(
2213 (),
2214 0x5b3d0e51614df053,
2215 fidl::encoding::DynamicFlags::FLEXIBLE,
2216 ___deadline,
2217 )?
2218 .into_result::<BufferCollectionMarker>("get_node_ref")?;
2219 Ok(_response)
2220 }
2221
2222 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
2223 /// rooted at a different child token of a common parent
2224 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
2225 /// passed-in `node_ref`.
2226 ///
2227 /// This call is for assisting with admission control de-duplication, and
2228 /// with debugging.
2229 ///
2230 /// The `node_ref` must be obtained using
2231 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
2232 ///
2233 /// The `node_ref` can be a duplicated handle; it's not necessary to call
2234 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
2235 ///
2236 /// If a calling token may not actually be a valid token at all due to a
2237 /// potentially hostile/untrusted provider of the token, call
2238 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
2239 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
2240 /// never responds due to a calling token not being a real token (not really
2241 /// talking to sysmem). Another option is to call
2242 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
2243 /// which also validates the token along with converting it to a
2244 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
2245 ///
2246 /// All table fields are currently required.
2247 ///
2248 /// - response `is_alternate`
2249 /// - true: The first parent node in common between the calling node and
2250 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
2251 /// that the calling `Node` and the `node_ref` `Node` will not have both
2252 /// their constraints apply - rather sysmem will choose one or the other
2253 /// of the constraints - never both. This is because only one child of
2254 /// a `BufferCollectionTokenGroup` is selected during logical
2255 /// allocation, with only that one child's subtree contributing to
2256 /// constraints aggregation.
2257 /// - false: The first parent node in common between the calling `Node`
2258 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
2259 /// Currently, this means the first parent node in common is a
2260 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
2261 /// `Release`ed). This means that the calling `Node` and the `node_ref`
2262 /// `Node` may have both their constraints apply during constraints
2263 /// aggregation of the logical allocation, if both `Node`(s) are
2264 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
2265 /// this case, there is no `BufferCollectionTokenGroup` that will
2266 /// directly prevent the two `Node`(s) from both being selected and
2267 /// their constraints both aggregated, but even when false, one or both
2268 /// `Node`(s) may still be eliminated from consideration if one or both
2269 /// `Node`(s) has a direct or indirect parent
2270 /// `BufferCollectionTokenGroup` which selects a child subtree other
2271 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
2272 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
2273 /// associated with the same buffer collection as the calling `Node`.
2274 /// Another reason for this error is if the `node_ref` is an
2275 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
2276 /// a real `node_ref` obtained from `GetNodeRef`.
2277 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
2278 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
2279 /// the needed rights expected on a real `node_ref`.
2280 /// * No other failing status codes are returned by this call. However,
2281 /// sysmem may add additional codes in future, so the client should have
2282 /// sensible default handling for any failing status code.
2283 pub fn r#is_alternate_for(
2284 &self,
2285 mut payload: NodeIsAlternateForRequest,
2286 ___deadline: zx::MonotonicInstant,
2287 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
2288 let _response = self.client.send_query::<
2289 NodeIsAlternateForRequest,
2290 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
2291 BufferCollectionMarker,
2292 >(
2293 &mut payload,
2294 0x3a58e00157e0825,
2295 fidl::encoding::DynamicFlags::FLEXIBLE,
2296 ___deadline,
2297 )?
2298 .into_result::<BufferCollectionMarker>("is_alternate_for")?;
2299 Ok(_response.map(|x| x))
2300 }
2301
2302 /// Get the buffer collection ID. This ID is also available from
2303 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
2304 /// within the collection).
2305 ///
2306 /// This call is mainly useful in situations where we can't convey a
2307 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
2308 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
2309 /// handle, which can be joined back up with a `BufferCollection` client end
2310 /// that was created via a different path. Prefer to convey a
2311 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
2312 ///
2313 /// Trusting a `buffer_collection_id` value from a source other than sysmem
2314 /// is analogous to trusting a koid value from a source other than zircon.
2315 /// Both should be avoided unless really necessary, and both require
2316 /// caution. In some situations it may be reasonable to refer to a
2317 /// pre-established `BufferCollection` by `buffer_collection_id` via a
2318 /// protocol for efficiency reasons, but an incoming value purporting to be
2319 /// a `buffer_collection_id` is not sufficient alone to justify granting the
2320 /// sender of the `buffer_collection_id` any capability. The sender must
2321 /// first prove to a receiver that the sender has/had a VMO or has/had a
2322 /// `BufferCollectionToken` to the same collection by sending a handle that
2323 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
2324 /// `buffer_collection_id` value. The receiver should take care to avoid
2325 /// assuming that a sender had a `BufferCollectionToken` in cases where the
2326 /// sender has only proven that the sender had a VMO.
2327 ///
2328 /// - response `buffer_collection_id` This ID is unique per buffer
2329 /// collection per boot. Each buffer is uniquely identified by the
2330 /// `buffer_collection_id` and `buffer_index` together.
2331 pub fn r#get_buffer_collection_id(
2332 &self,
2333 ___deadline: zx::MonotonicInstant,
2334 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
2335 let _response = self.client.send_query::<
2336 fidl::encoding::EmptyPayload,
2337 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
2338 BufferCollectionMarker,
2339 >(
2340 (),
2341 0x77d19a494b78ba8c,
2342 fidl::encoding::DynamicFlags::FLEXIBLE,
2343 ___deadline,
2344 )?
2345 .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
2346 Ok(_response)
2347 }
2348
2349 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
2350 /// created after this message to weak, which means that a client's `Node`
2351 /// client end (or a child created after this message) is not alone
2352 /// sufficient to keep allocated VMOs alive.
2353 ///
2354 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
2355 /// `close_weak_asap`.
2356 ///
2357 /// This message is only permitted before the `Node` becomes ready for
2358 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
2359 /// * `BufferCollectionToken`: any time
2360 /// * `BufferCollection`: before `SetConstraints`
2361 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
2362 ///
2363 /// Currently, no conversion from strong `Node` to weak `Node` after ready
2364 /// for allocation is provided, but a client can simulate that by creating
2365 /// an additional `Node` before allocation and setting that additional
2366 /// `Node` to weak, and then potentially at some point later sending
2367 /// `Release` and closing the client end of the client's strong `Node`, but
2368 /// keeping the client's weak `Node`.
2369 ///
2370 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
2371 /// collection failure (all `Node` client end(s) will see
2372 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
2373 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
2374 /// this situation until all `Node`(s) are ready for allocation. For initial
2375 /// allocation to succeed, at least one strong `Node` is required to exist
2376 /// at allocation time, but after that client receives VMO handles, that
2377 /// client can `BufferCollection.Release` and close the client end without
2378 /// causing this type of failure.
2379 ///
2380 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
2381 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
2382 /// separately as appropriate.
2383 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
2384 self.client.send::<fidl::encoding::EmptyPayload>(
2385 (),
2386 0x22dd3ea514eeffe1,
2387 fidl::encoding::DynamicFlags::FLEXIBLE,
2388 )
2389 }
2390
2391 /// This indicates to sysmem that the client is prepared to pay attention to
2392 /// `close_weak_asap`.
2393 ///
2394 /// If sent, this message must be before
2395 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
2396 ///
2397 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
2398 /// send this message before `WaitForAllBuffersAllocated`, or a parent
2399 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
2400 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
2401 /// trigger buffer collection failure.
2402 ///
2403 /// This message is necessary because weak sysmem VMOs have not always been
2404 /// a thing, so older clients are not aware of the need to pay attention to
2405 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
2406 /// sysmem weak VMO handles asap. By having this message and requiring
2407 /// participants to indicate their acceptance of this aspect of the overall
2408 /// protocol, we avoid situations where an older client is delivered a weak
2409 /// VMO without any way for sysmem to get that VMO to close quickly later
2410 /// (and on a per-buffer basis).
2411 ///
2412 /// A participant that doesn't handle `close_weak_asap` and also doesn't
2413 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
2414 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
2415 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
2416 /// same participant has a child/delegate which does retrieve VMOs, that
2417 /// child/delegate will need to send `SetWeakOk` before
2418 /// `WaitForAllBuffersAllocated`.
2419 ///
2420 /// + request `for_child_nodes_also` If present and true, this means direct
2421 /// child nodes of this node created after this message plus all
2422 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
2423 /// those nodes. Any child node of this node that was created before this
2424 /// message is not included. This setting is "sticky" in the sense that a
2425 /// subsequent `SetWeakOk` without this bool set to true does not reset
2426 /// the server-side bool. If this creates a problem for a participant, a
2427 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
2428 /// tokens instead, as appropriate. A participant should only set
2429 /// `for_child_nodes_also` true if the participant can really promise to
2430 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
2431 /// weak VMO handles held by participants holding the corresponding child
2432 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
2433 /// which are using sysmem(1) can be weak, despite the clients of those
2434 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
2435 /// direct way to find out about `close_weak_asap`. This only applies to
2436 /// descendents of this `Node` which are using sysmem(1), not to this
2437 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
2438 /// token, which will fail allocation unless an ancestor of this `Node`
2439 /// specified `for_child_nodes_also` true.
2440 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
2441 self.client.send::<NodeSetWeakOkRequest>(
2442 &mut payload,
2443 0x38a44fc4d7724be9,
2444 fidl::encoding::DynamicFlags::FLEXIBLE,
2445 )
2446 }
2447
2448 /// The server_end will be closed after this `Node` and any child nodes have
2449 /// have released their buffer counts, making those counts available for
2450 /// reservation by a different `Node` via
2451 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
2452 ///
2453 /// The `Node` buffer counts may not be released until the entire tree of
2454 /// `Node`(s) is closed or failed, because
2455 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
2456 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
2457 /// `Node` buffer counts remain reserved until the orphaned node is later
2458 /// cleaned up.
2459 ///
2460 /// If the `Node` exceeds a fairly large number of attached eventpair server
2461 /// ends, a log message will indicate this and the `Node` (and the
2462 /// appropriate) sub-tree will fail.
2463 ///
2464 /// The `server_end` will remain open when
2465 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
2466 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
2467 /// [`fuchsia.sysmem2/BufferCollection`].
2468 ///
2469 /// This message can also be used with a
2470 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
2471 pub fn r#attach_node_tracking(
2472 &self,
2473 mut payload: NodeAttachNodeTrackingRequest,
2474 ) -> Result<(), fidl::Error> {
2475 self.client.send::<NodeAttachNodeTrackingRequest>(
2476 &mut payload,
2477 0x3f22f2a293d3cdac,
2478 fidl::encoding::DynamicFlags::FLEXIBLE,
2479 )
2480 }
2481
2482 /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
2483 /// collection.
2484 ///
2485 /// A participant may only call
2486 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
2487 /// [`fuchsia.sysmem2/BufferCollection`].
2488 ///
2489 /// For buffer allocation to be attempted, all holders of a
2490 /// `BufferCollection` client end need to call `SetConstraints` before
2491 /// sysmem will attempt to allocate buffers.
2492 ///
2493 /// + request `constraints` These are the constraints on the buffer
2494 /// collection imposed by the sending client/participant. The
2495 /// `constraints` field is not required to be set. If not set, the client
2496 /// is not setting any actual constraints, but is indicating that the
2497 /// client has no constraints to set. A client that doesn't set the
2498 /// `constraints` field won't receive any VMO handles, but can still find
2499 /// out how many buffers were allocated and can still refer to buffers by
2500 /// their `buffer_index`.
2501 pub fn r#set_constraints(
2502 &self,
2503 mut payload: BufferCollectionSetConstraintsRequest,
2504 ) -> Result<(), fidl::Error> {
2505 self.client.send::<BufferCollectionSetConstraintsRequest>(
2506 &mut payload,
2507 0x1fde0f19d650197b,
2508 fidl::encoding::DynamicFlags::FLEXIBLE,
2509 )
2510 }
2511
2512 /// Wait until all buffers are allocated.
2513 ///
2514 /// This FIDL call completes when buffers have been allocated, or completes
2515 /// with some failure detail if allocation has been attempted but failed.
2516 ///
2517 /// The following must occur before buffers will be allocated:
2518 /// * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
2519 /// collection must be turned in via `BindSharedCollection` to get a
2520 /// [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
2521 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
2522 /// or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
2523 /// to them.
2524 /// * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
2525 /// must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
2526 /// sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
2527 /// sent to them.
2528 ///
2529 /// - result `buffer_collection_info` The VMO handles and other related
2530 /// info.
2531 /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
2532 /// cannot be fulfilled due to resource exhaustion.
2533 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
2534 /// malformed.
2535 /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
2536 /// request is valid but cannot be satisfied, perhaps due to hardware
2537 /// limitations. This can happen if participants have incompatible
2538 /// constraints (empty intersection, roughly speaking). See the log for
2539 /// more info. In cases where a participant could potentially be treated
2540 /// as optional, see [`BufferCollectionTokenGroup`]. When using
2541 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
2542 /// error code if there aren't enough buffers in the pre-existing
2543 /// collection to satisfy the constraints set on the attached token and
2544 /// any sub-tree of tokens derived from the attached token.
2545 pub fn r#wait_for_all_buffers_allocated(
2546 &self,
2547 ___deadline: zx::MonotonicInstant,
2548 ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
2549 let _response = self
2550 .client
2551 .send_query::<fidl::encoding::EmptyPayload, fidl::encoding::FlexibleResultType<
2552 BufferCollectionWaitForAllBuffersAllocatedResponse,
2553 Error,
2554 >, BufferCollectionMarker>(
2555 (),
2556 0x62300344b61404e,
2557 fidl::encoding::DynamicFlags::FLEXIBLE,
2558 ___deadline,
2559 )?
2560 .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
2561 Ok(_response.map(|x| x))
2562 }
2563
2564 /// Checks whether all the buffers have been allocated, in a polling
2565 /// fashion.
2566 ///
2567 /// * If the buffer collection has been allocated, returns success.
2568 /// * If the buffer collection failed allocation, returns the same
2569 /// [`fuchsia.sysmem2/Error`] as
2570 /// [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
2571 /// return.
2572 /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
2573 /// attempted allocation yet. This means that WaitForAllBuffersAllocated
2574 /// would not respond quickly.
2575 pub fn r#check_all_buffers_allocated(
2576 &self,
2577 ___deadline: zx::MonotonicInstant,
2578 ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
2579 let _response = self.client.send_query::<
2580 fidl::encoding::EmptyPayload,
2581 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
2582 BufferCollectionMarker,
2583 >(
2584 (),
2585 0x35a5fe77ce939c10,
2586 fidl::encoding::DynamicFlags::FLEXIBLE,
2587 ___deadline,
2588 )?
2589 .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
2590 Ok(_response.map(|x| x))
2591 }
2592
2593 /// Create a new token to add a new participant to an existing logical
2594 /// buffer collection, if the existing collection's buffer counts,
2595 /// constraints, and participants allow.
2596 ///
2597 /// This can be useful in replacing a failed participant, and/or in
2598 /// adding/re-adding a participant after buffers have already been
2599 /// allocated.
2600 ///
2601 /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
2602 /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
2603 /// goes through the normal procedure of setting constraints or closing
2604 /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
2605 /// clients' point of view, despite the possibility that all the buffers
2606 /// were actually allocated previously. This process is called "logical
2607 /// allocation". Most instances of "allocation" in docs for other messages
2608 /// can also be read as "allocation or logical allocation" while remaining
2609 /// valid, but we just say "allocation" in most places for brevity/clarity
2610 /// of explanation, with the details of "logical allocation" left for the
2611 /// docs here on `AttachToken`.
2612 ///
2613 /// Failure of an attached `Node` does not propagate to the parent of the
2614 /// attached `Node`. More generally, failure of a child `Node` is blocked
2615 /// from reaching its parent `Node` if the child is attached, or if the
2616 /// child is dispensable and the failure occurred after logical allocation
2617 /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
2618 ///
2619 /// A participant may in some scenarios choose to initially use a
2620 /// dispensable token for a given instance of a delegate participant, and
2621 /// then later if the first instance of that delegate participant fails, a
2622 /// new second instance of that delegate participant my be given a token
2623 /// created with `AttachToken`.
2624 ///
2625 /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
2626 /// client end, the token acts like any other token. The client can
2627 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
2628 /// and can send the token to a different process/participant. The
2629 /// `BufferCollectionToken` `Node` should be converted to a
2630 /// `BufferCollection` `Node` as normal by sending
2631 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
2632 /// without causing subtree failure by sending
2633 /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
2634 /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
2635 /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
2636 /// the `BufferCollection`.
2637 ///
2638 /// Within the subtree, a success result from
2639 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
2640 /// the subtree participants' constraints were satisfiable using the
2641 /// already-existing buffer collection, the already-established
2642 /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
2643 /// constraints, and the already-existing other participants (already added
2644 /// via successful logical allocation) and their specified buffer counts in
2645 /// their constraints. A failure result means the new participants'
2646 /// constraints cannot be satisfied using the existing buffer collection and
2647 /// its already-added participants. Creating a new collection instead may
2648 /// allow all participants' constraints to be satisfied, assuming
2649 /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
2650 /// used.
2651 ///
2652 /// A token created with `AttachToken` performs constraints aggregation with
2653 /// all constraints currently in effect on the buffer collection, plus the
2654 /// attached token under consideration plus child tokens under the attached
2655 /// token which are not themselves an attached token or under such a token.
2656 /// Further subtrees under this subtree are considered for logical
2657 /// allocation only after this subtree has completed logical allocation.
2658 ///
2659 /// Assignment of existing buffers to participants'
2660 /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
2661 /// etc is first-come first-served, but a child can't logically allocate
2662 /// before all its parents have sent `SetConstraints`.
2663 ///
2664 /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
2665 /// in contrast to `AttachToken`, has the created token `Node` + child
2666 /// `Node`(s) (in the created subtree but not in any subtree under this
2667 /// subtree) participate in constraints aggregation along with its parent
2668 /// during the parent's allocation or logical allocation.
2669 ///
2670 /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
2671 /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
2672 /// sysmem before the new token can be passed to `BindSharedCollection`. The
2673 /// `Sync` of the new token can be accomplished with
2674 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
2675 /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
2676 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
2677 /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
2678 /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
2679 /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
2680 /// created token, to also sync those additional tokens to sysmem using a
2681 /// single round-trip.
2682 ///
2683 /// All table fields are currently required.
2684 ///
2685 /// + request `rights_attentuation_mask` This allows attenuating the VMO
2686 /// rights of the subtree. These values for `rights_attenuation_mask`
2687 /// result in no attenuation (note that 0 is not on this list):
2688 /// + ZX_RIGHT_SAME_RIGHTS (preferred)
2689 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
2690 /// + request `token_request` The server end of the `BufferCollectionToken`
2691 /// channel. The client retains the client end.
2692 pub fn r#attach_token(
2693 &self,
2694 mut payload: BufferCollectionAttachTokenRequest,
2695 ) -> Result<(), fidl::Error> {
2696 self.client.send::<BufferCollectionAttachTokenRequest>(
2697 &mut payload,
2698 0x46ac7d0008492982,
2699 fidl::encoding::DynamicFlags::FLEXIBLE,
2700 )
2701 }
2702
2703 /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
2704 /// buffers have been allocated and only the specified number of buffers (or
2705 /// fewer) remain in the buffer collection.
2706 ///
2707 /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
2708 /// client to wait until an old buffer collection is fully or mostly
2709 /// deallocated before attempting allocation of a new buffer collection. The
2710 /// eventpair is only signalled when the buffers of this collection have
2711 /// been fully deallocated (not just un-referenced by clients, but all the
2712 /// memory consumed by those buffers has been fully reclaimed/recycled), or
2713 /// when allocation or logical allocation fails for the tree or subtree
2714 /// including this [`fuchsia.sysmem2/BufferCollection`].
2715 ///
2716 /// The eventpair won't be signalled until allocation or logical allocation
2717 /// has completed; until then, the collection's current buffer count is
2718 /// ignored.
2719 ///
2720 /// If logical allocation fails for an attached subtree (using
2721 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
2722 /// eventpair will close during that failure regardless of the number of
2723 /// buffers potenitally allocated in the overall buffer collection. This is
2724 /// for logical allocation consistency with normal allocation.
2725 ///
2726 /// The lifetime signalled by this event includes asynchronous cleanup of
2727 /// allocated buffers, and this asynchronous cleanup cannot occur until all
2728 /// holders of VMO handles to the buffers have closed those VMO handles.
2729 /// Therefore, clients should take care not to become blocked forever
2730 /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
2731 /// participants using the logical buffer collection (including the waiter
2732 /// itself) are less trusted, less reliable, or potentially blocked by the
2733 /// wait itself. Waiting asynchronously is recommended. Setting a deadline
2734 /// for the client wait may be prudent, depending on details of how the
2735 /// collection and/or its VMOs are used or shared. Failure to allocate a
2736 /// new/replacement buffer collection is better than getting stuck forever.
2737 ///
2738 /// The sysmem server itself intentionally does not perform any waiting on
2739 /// already-failed collections' VMOs to finish cleaning up before attempting
2740 /// a new allocation, and the sysmem server intentionally doesn't retry
2741 /// allocation if a new allocation fails due to out of memory, even if that
2742 /// failure is potentially due to continued existence of an old collection's
2743 /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
2744 /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
2745 /// as long as the waiting client is careful to not create a deadlock.
2746 ///
2747 /// Continued existence of old collections that are still cleaning up is not
2748 /// the only reason that a new allocation may fail due to insufficient
2749 /// memory, even if the new allocation is allocating physically contiguous
2750 /// buffers. Overall system memory pressure can also be the cause of failure
2751 /// to allocate a new collection. See also
2752 /// [`fuchsia.memorypressure/Provider`].
2753 ///
2754 /// `AttachLifetimeTracking` is meant to be compatible with other protocols
2755 /// with a similar `AttachLifetimeTracking` message; duplicates of the same
2756 /// `eventpair` handle (server end) can be sent via more than one
2757 /// `AttachLifetimeTracking` message to different protocols, and the
2758 /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
2759 /// the conditions are met (all holders of duplicates have closed their
2760 /// server end handle(s)). Also, thanks to how eventpair endponts work, the
2761 /// client end can (also) be duplicated without preventing the
2762 /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
2763 ///
2764 /// The server intentionally doesn't "trust" any signals set on the
2765 /// `server_end`. This mechanism intentionally uses only
2766 /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
2767 /// "early", and is only set when all handles to the server end eventpair
2768 /// are closed. No meaning is associated with any of the other signals, and
2769 /// clients should ignore any other signal bits on either end of the
2770 /// `eventpair`.
2771 ///
2772 /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
2773 /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
2774 /// transfer without causing `BufferCollection` channel failure).
2775 ///
2776 /// All table fields are currently required.
2777 ///
2778 /// + request `server_end` This eventpair handle will be closed by the
2779 /// sysmem server when buffers have been allocated initially and the
2780 /// number of buffers is then less than or equal to `buffers_remaining`.
2781 /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
2782 /// fewer) buffers to be fully deallocated. A number greater than zero can
2783 /// be useful in situations where a known number of buffers are
2784 /// intentionally not closed so that the data can continue to be used,
2785 /// such as for keeping the last available video frame displayed in the UI
2786 /// even if the video stream was using protected output buffers. It's
2787 /// outside the scope of the `BufferCollection` interface (at least for
2788 /// now) to determine how many buffers may be held without closing, but
2789 /// it'll typically be in the range 0-2.
2790 pub fn r#attach_lifetime_tracking(
2791 &self,
2792 mut payload: BufferCollectionAttachLifetimeTrackingRequest,
2793 ) -> Result<(), fidl::Error> {
2794 self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
2795 &mut payload,
2796 0x3ecb510113116dcf,
2797 fidl::encoding::DynamicFlags::FLEXIBLE,
2798 )
2799 }
2800}
2801
2802#[cfg(target_os = "fuchsia")]
2803impl From<BufferCollectionSynchronousProxy> for zx::NullableHandle {
2804 fn from(value: BufferCollectionSynchronousProxy) -> Self {
2805 value.into_channel().into()
2806 }
2807}
2808
2809#[cfg(target_os = "fuchsia")]
2810impl From<fidl::Channel> for BufferCollectionSynchronousProxy {
2811 fn from(value: fidl::Channel) -> Self {
2812 Self::new(value)
2813 }
2814}
2815
2816#[cfg(target_os = "fuchsia")]
2817impl fidl::endpoints::FromClient for BufferCollectionSynchronousProxy {
2818 type Protocol = BufferCollectionMarker;
2819
2820 fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionMarker>) -> Self {
2821 Self::new(value.into_channel())
2822 }
2823}
2824
2825#[derive(Debug, Clone)]
2826pub struct BufferCollectionProxy {
2827 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
2828}
2829
2830impl fidl::endpoints::Proxy for BufferCollectionProxy {
2831 type Protocol = BufferCollectionMarker;
2832
2833 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
2834 Self::new(inner)
2835 }
2836
2837 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
2838 self.client.into_channel().map_err(|client| Self { client })
2839 }
2840
2841 fn as_channel(&self) -> &::fidl::AsyncChannel {
2842 self.client.as_channel()
2843 }
2844}
2845
2846impl BufferCollectionProxy {
2847 /// Create a new Proxy for fuchsia.sysmem2/BufferCollection.
2848 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
2849 let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
2850 Self { client: fidl::client::Client::new(channel, protocol_name) }
2851 }
2852
2853 /// Get a Stream of events from the remote end of the protocol.
2854 ///
2855 /// # Panics
2856 ///
2857 /// Panics if the event stream was already taken.
2858 pub fn take_event_stream(&self) -> BufferCollectionEventStream {
2859 BufferCollectionEventStream { event_receiver: self.client.take_event_receiver() }
2860 }
2861
2862 /// Ensure that previous messages have been received server side. This is
2863 /// particularly useful after previous messages that created new tokens,
2864 /// because a token must be known to the sysmem server before sending the
2865 /// token to another participant.
2866 ///
2867 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
2868 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
2869 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
2870 /// to mitigate the possibility of a hostile/fake
2871 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
2872 /// Another way is to pass the token to
2873 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
2874 /// the token as part of exchanging it for a
2875 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
2876 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
2877 /// of stalling.
2878 ///
2879 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
2880 /// and then starting and completing a `Sync`, it's then safe to send the
2881 /// `BufferCollectionToken` client ends to other participants knowing the
2882 /// server will recognize the tokens when they're sent by the other
2883 /// participants to sysmem in a
2884 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
2885 /// efficient way to create tokens while avoiding unnecessary round trips.
2886 ///
2887 /// Other options include waiting for each
2888 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
2889 /// individually (using separate call to `Sync` after each), or calling
2890 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
2891 /// converted to a `BufferCollection` via
2892 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
2893 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
2894 /// the sync step and can create multiple tokens at once.
2895 pub fn r#sync(
2896 &self,
2897 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
2898 BufferCollectionProxyInterface::r#sync(self)
2899 }
2900
2901 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
2902 ///
2903 /// Normally a participant will convert a `BufferCollectionToken` into a
2904 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
2905 /// `Release` via the token (and then close the channel immediately or
2906 /// shortly later in response to server closing the server end), which
2907 /// avoids causing buffer collection failure. Without a prior `Release`,
2908 /// closing the `BufferCollectionToken` client end will cause buffer
2909 /// collection failure.
2910 ///
2911 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
2912 ///
2913 /// By default the server handles unexpected closure of a
2914 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
2915 /// first) by failing the buffer collection. Partly this is to expedite
2916 /// closing VMO handles to reclaim memory when any participant fails. If a
2917 /// participant would like to cleanly close a `BufferCollection` without
2918 /// causing buffer collection failure, the participant can send `Release`
2919 /// before closing the `BufferCollection` client end. The `Release` can
2920 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
2921 /// buffer collection won't require constraints from this node in order to
2922 /// allocate. If after `SetConstraints`, the constraints are retained and
2923 /// aggregated, despite the lack of `BufferCollection` connection at the
2924 /// time of constraints aggregation.
2925 ///
2926 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2927 ///
2928 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2929 /// end (without `Release` first) will trigger failure of the buffer
2930 /// collection. To close a `BufferCollectionTokenGroup` channel without
2931 /// failing the buffer collection, ensure that AllChildrenPresent() has been
2932 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2933 /// client end.
2934 ///
2935 /// If `Release` occurs before
2936 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2937 /// buffer collection will fail (triggered by reception of `Release` without
2938 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2939 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2940 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2941 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2942 /// close requires `AllChildrenPresent` (if not already sent), then
2943 /// `Release`, then close client end.
2944 ///
2945 /// If `Release` occurs after `AllChildrenPresent`, the children and all
2946 /// their constraints remain intact (just as they would if the
2947 /// `BufferCollectionTokenGroup` channel had remained open), and the client
2948 /// end close doesn't trigger buffer collection failure.
2949 ///
2950 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2951 ///
2952 /// For brevity, the per-channel-protocol paragraphs above ignore the
2953 /// separate failure domain created by
2954 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2955 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2956 /// unexpectedly closes (without `Release` first) and that client end is
2957 /// under a failure domain, instead of failing the whole buffer collection,
2958 /// the failure domain is failed, but the buffer collection itself is
2959 /// isolated from failure of the failure domain. Such failure domains can be
2960 /// nested, in which case only the inner-most failure domain in which the
2961 /// `Node` resides fails.
2962 pub fn r#release(&self) -> Result<(), fidl::Error> {
2963 BufferCollectionProxyInterface::r#release(self)
2964 }
2965
2966 /// Set a name for VMOs in this buffer collection.
2967 ///
2968 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2969 /// will be truncated to fit. The name of the vmo will be suffixed with the
2970 /// buffer index within the collection (if the suffix fits within
2971 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2972 /// listed in the inspect data.
2973 ///
2974 /// The name only affects VMOs allocated after the name is set; this call
2975 /// does not rename existing VMOs. If multiple clients set different names
2976 /// then the larger priority value will win. Setting a new name with the
2977 /// same priority as a prior name doesn't change the name.
2978 ///
2979 /// All table fields are currently required.
2980 ///
2981 /// + request `priority` The name is only set if this is the first `SetName`
2982 /// or if `priority` is greater than any previous `priority` value in
2983 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
2984 /// + request `name` The name for VMOs created under this buffer collection.
2985 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2986 BufferCollectionProxyInterface::r#set_name(self, payload)
2987 }
2988
2989 /// Set information about the current client that can be used by sysmem to
2990 /// help diagnose leaking memory and allocation stalls waiting for a
2991 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2992 ///
2993 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2994 /// `Node`(s) derived from this `Node`, unless overriden by
2995 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2996 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2997 ///
2998 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2999 /// `Allocator` is the most efficient way to ensure that all
3000 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
3001 /// set, and is also more efficient than separately sending the same debug
3002 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
3003 /// created [`fuchsia.sysmem2/Node`].
3004 ///
3005 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
3006 /// indicate which client is closing their channel first, leading to subtree
3007 /// failure (which can be normal if the purpose of the subtree is over, but
3008 /// if happening earlier than expected, the client-channel-specific name can
3009 /// help diagnose where the failure is first coming from, from sysmem's
3010 /// point of view).
3011 ///
3012 /// All table fields are currently required.
3013 ///
3014 /// + request `name` This can be an arbitrary string, but the current
3015 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
3016 /// + request `id` This can be an arbitrary id, but the current process ID
3017 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
3018 pub fn r#set_debug_client_info(
3019 &self,
3020 mut payload: &NodeSetDebugClientInfoRequest,
3021 ) -> Result<(), fidl::Error> {
3022 BufferCollectionProxyInterface::r#set_debug_client_info(self, payload)
3023 }
3024
3025 /// Sysmem logs a warning if sysmem hasn't seen
3026 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
3027 /// within 5 seconds after creation of a new collection.
3028 ///
3029 /// Clients can call this method to change when the log is printed. If
3030 /// multiple client set the deadline, it's unspecified which deadline will
3031 /// take effect.
3032 ///
3033 /// In most cases the default works well.
3034 ///
3035 /// All table fields are currently required.
3036 ///
3037 /// + request `deadline` The time at which sysmem will start trying to log
3038 /// the warning, unless all constraints are with sysmem by then.
3039 pub fn r#set_debug_timeout_log_deadline(
3040 &self,
3041 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3042 ) -> Result<(), fidl::Error> {
3043 BufferCollectionProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
3044 }
3045
3046 /// This enables verbose logging for the buffer collection.
3047 ///
3048 /// Verbose logging includes constraints set via
3049 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
3050 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
3051 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
3052 /// the tree of `Node`(s).
3053 ///
3054 /// Normally sysmem prints only a single line complaint when aggregation
3055 /// fails, with just the specific detailed reason that aggregation failed,
3056 /// with little surrounding context. While this is often enough to diagnose
3057 /// a problem if only a small change was made and everything was working
3058 /// before the small change, it's often not particularly helpful for getting
3059 /// a new buffer collection to work for the first time. Especially with
3060 /// more complex trees of nodes, involving things like
3061 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
3062 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
3063 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
3064 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
3065 /// looks like and why it's failing a logical allocation, or why a tree or
3066 /// subtree is failing sooner than expected.
3067 ///
3068 /// The intent of the extra logging is to be acceptable from a performance
3069 /// point of view, under the assumption that verbose logging is only enabled
3070 /// on a low number of buffer collections. If we're not tracking down a bug,
3071 /// we shouldn't send this message.
3072 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3073 BufferCollectionProxyInterface::r#set_verbose_logging(self)
3074 }
3075
3076 /// This gets a handle that can be used as a parameter to
3077 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
3078 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
3079 /// client obtained this handle from this `Node`.
3080 ///
3081 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
3082 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
3083 /// despite the two calls typically being on different channels.
3084 ///
3085 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
3086 ///
3087 /// All table fields are currently required.
3088 ///
3089 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
3090 /// different `Node` channel, to prove that the client obtained the handle
3091 /// from this `Node`.
3092 pub fn r#get_node_ref(
3093 &self,
3094 ) -> fidl::client::QueryResponseFut<
3095 NodeGetNodeRefResponse,
3096 fidl::encoding::DefaultFuchsiaResourceDialect,
3097 > {
3098 BufferCollectionProxyInterface::r#get_node_ref(self)
3099 }
3100
3101 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
3102 /// rooted at a different child token of a common parent
3103 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
3104 /// passed-in `node_ref`.
3105 ///
3106 /// This call is for assisting with admission control de-duplication, and
3107 /// with debugging.
3108 ///
3109 /// The `node_ref` must be obtained using
3110 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
3111 ///
3112 /// The `node_ref` can be a duplicated handle; it's not necessary to call
3113 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
3114 ///
3115 /// If a calling token may not actually be a valid token at all due to a
3116 /// potentially hostile/untrusted provider of the token, call
3117 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
3118 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
3119 /// never responds due to a calling token not being a real token (not really
3120 /// talking to sysmem). Another option is to call
3121 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
3122 /// which also validates the token along with converting it to a
3123 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
3124 ///
3125 /// All table fields are currently required.
3126 ///
3127 /// - response `is_alternate`
3128 /// - true: The first parent node in common between the calling node and
3129 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
3130 /// that the calling `Node` and the `node_ref` `Node` will not have both
3131 /// their constraints apply - rather sysmem will choose one or the other
3132 /// of the constraints - never both. This is because only one child of
3133 /// a `BufferCollectionTokenGroup` is selected during logical
3134 /// allocation, with only that one child's subtree contributing to
3135 /// constraints aggregation.
3136 /// - false: The first parent node in common between the calling `Node`
3137 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
3138 /// Currently, this means the first parent node in common is a
3139 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
3140 /// `Release`ed). This means that the calling `Node` and the `node_ref`
3141 /// `Node` may have both their constraints apply during constraints
3142 /// aggregation of the logical allocation, if both `Node`(s) are
3143 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
3144 /// this case, there is no `BufferCollectionTokenGroup` that will
3145 /// directly prevent the two `Node`(s) from both being selected and
3146 /// their constraints both aggregated, but even when false, one or both
3147 /// `Node`(s) may still be eliminated from consideration if one or both
3148 /// `Node`(s) has a direct or indirect parent
3149 /// `BufferCollectionTokenGroup` which selects a child subtree other
3150 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
3151 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
3152 /// associated with the same buffer collection as the calling `Node`.
3153 /// Another reason for this error is if the `node_ref` is an
3154 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
3155 /// a real `node_ref` obtained from `GetNodeRef`.
3156 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
3157 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
3158 /// the needed rights expected on a real `node_ref`.
3159 /// * No other failing status codes are returned by this call. However,
3160 /// sysmem may add additional codes in future, so the client should have
3161 /// sensible default handling for any failing status code.
3162 pub fn r#is_alternate_for(
3163 &self,
3164 mut payload: NodeIsAlternateForRequest,
3165 ) -> fidl::client::QueryResponseFut<
3166 NodeIsAlternateForResult,
3167 fidl::encoding::DefaultFuchsiaResourceDialect,
3168 > {
3169 BufferCollectionProxyInterface::r#is_alternate_for(self, payload)
3170 }
3171
3172 /// Get the buffer collection ID. This ID is also available from
3173 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
3174 /// within the collection).
3175 ///
3176 /// This call is mainly useful in situations where we can't convey a
3177 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
3178 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
3179 /// handle, which can be joined back up with a `BufferCollection` client end
3180 /// that was created via a different path. Prefer to convey a
3181 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
3182 ///
3183 /// Trusting a `buffer_collection_id` value from a source other than sysmem
3184 /// is analogous to trusting a koid value from a source other than zircon.
3185 /// Both should be avoided unless really necessary, and both require
3186 /// caution. In some situations it may be reasonable to refer to a
3187 /// pre-established `BufferCollection` by `buffer_collection_id` via a
3188 /// protocol for efficiency reasons, but an incoming value purporting to be
3189 /// a `buffer_collection_id` is not sufficient alone to justify granting the
3190 /// sender of the `buffer_collection_id` any capability. The sender must
3191 /// first prove to a receiver that the sender has/had a VMO or has/had a
3192 /// `BufferCollectionToken` to the same collection by sending a handle that
3193 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
3194 /// `buffer_collection_id` value. The receiver should take care to avoid
3195 /// assuming that a sender had a `BufferCollectionToken` in cases where the
3196 /// sender has only proven that the sender had a VMO.
3197 ///
3198 /// - response `buffer_collection_id` This ID is unique per buffer
3199 /// collection per boot. Each buffer is uniquely identified by the
3200 /// `buffer_collection_id` and `buffer_index` together.
3201 pub fn r#get_buffer_collection_id(
3202 &self,
3203 ) -> fidl::client::QueryResponseFut<
3204 NodeGetBufferCollectionIdResponse,
3205 fidl::encoding::DefaultFuchsiaResourceDialect,
3206 > {
3207 BufferCollectionProxyInterface::r#get_buffer_collection_id(self)
3208 }
3209
3210 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
3211 /// created after this message to weak, which means that a client's `Node`
3212 /// client end (or a child created after this message) is not alone
3213 /// sufficient to keep allocated VMOs alive.
3214 ///
3215 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
3216 /// `close_weak_asap`.
3217 ///
3218 /// This message is only permitted before the `Node` becomes ready for
3219 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
3220 /// * `BufferCollectionToken`: any time
3221 /// * `BufferCollection`: before `SetConstraints`
3222 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
3223 ///
3224 /// Currently, no conversion from strong `Node` to weak `Node` after ready
3225 /// for allocation is provided, but a client can simulate that by creating
3226 /// an additional `Node` before allocation and setting that additional
3227 /// `Node` to weak, and then potentially at some point later sending
3228 /// `Release` and closing the client end of the client's strong `Node`, but
3229 /// keeping the client's weak `Node`.
3230 ///
3231 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
3232 /// collection failure (all `Node` client end(s) will see
3233 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
3234 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
3235 /// this situation until all `Node`(s) are ready for allocation. For initial
3236 /// allocation to succeed, at least one strong `Node` is required to exist
3237 /// at allocation time, but after that client receives VMO handles, that
3238 /// client can `BufferCollection.Release` and close the client end without
3239 /// causing this type of failure.
3240 ///
3241 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
3242 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
3243 /// separately as appropriate.
3244 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
3245 BufferCollectionProxyInterface::r#set_weak(self)
3246 }
3247
3248 /// This indicates to sysmem that the client is prepared to pay attention to
3249 /// `close_weak_asap`.
3250 ///
3251 /// If sent, this message must be before
3252 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
3253 ///
3254 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
3255 /// send this message before `WaitForAllBuffersAllocated`, or a parent
3256 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
3257 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
3258 /// trigger buffer collection failure.
3259 ///
3260 /// This message is necessary because weak sysmem VMOs have not always been
3261 /// a thing, so older clients are not aware of the need to pay attention to
3262 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
3263 /// sysmem weak VMO handles asap. By having this message and requiring
3264 /// participants to indicate their acceptance of this aspect of the overall
3265 /// protocol, we avoid situations where an older client is delivered a weak
3266 /// VMO without any way for sysmem to get that VMO to close quickly later
3267 /// (and on a per-buffer basis).
3268 ///
3269 /// A participant that doesn't handle `close_weak_asap` and also doesn't
3270 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
3271 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
3272 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
3273 /// same participant has a child/delegate which does retrieve VMOs, that
3274 /// child/delegate will need to send `SetWeakOk` before
3275 /// `WaitForAllBuffersAllocated`.
3276 ///
3277 /// + request `for_child_nodes_also` If present and true, this means direct
3278 /// child nodes of this node created after this message plus all
3279 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
3280 /// those nodes. Any child node of this node that was created before this
3281 /// message is not included. This setting is "sticky" in the sense that a
3282 /// subsequent `SetWeakOk` without this bool set to true does not reset
3283 /// the server-side bool. If this creates a problem for a participant, a
3284 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
3285 /// tokens instead, as appropriate. A participant should only set
3286 /// `for_child_nodes_also` true if the participant can really promise to
3287 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
3288 /// weak VMO handles held by participants holding the corresponding child
3289 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
3290 /// which are using sysmem(1) can be weak, despite the clients of those
3291 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
3292 /// direct way to find out about `close_weak_asap`. This only applies to
3293 /// descendents of this `Node` which are using sysmem(1), not to this
3294 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
3295 /// token, which will fail allocation unless an ancestor of this `Node`
3296 /// specified `for_child_nodes_also` true.
3297 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3298 BufferCollectionProxyInterface::r#set_weak_ok(self, payload)
3299 }
3300
3301 /// The server_end will be closed after this `Node` and any child nodes have
3302 /// have released their buffer counts, making those counts available for
3303 /// reservation by a different `Node` via
3304 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
3305 ///
3306 /// The `Node` buffer counts may not be released until the entire tree of
3307 /// `Node`(s) is closed or failed, because
3308 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
3309 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
3310 /// `Node` buffer counts remain reserved until the orphaned node is later
3311 /// cleaned up.
3312 ///
3313 /// If the `Node` exceeds a fairly large number of attached eventpair server
3314 /// ends, a log message will indicate this and the `Node` (and the
3315 /// appropriate) sub-tree will fail.
3316 ///
3317 /// The `server_end` will remain open when
3318 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
3319 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
3320 /// [`fuchsia.sysmem2/BufferCollection`].
3321 ///
3322 /// This message can also be used with a
3323 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
3324 pub fn r#attach_node_tracking(
3325 &self,
3326 mut payload: NodeAttachNodeTrackingRequest,
3327 ) -> Result<(), fidl::Error> {
3328 BufferCollectionProxyInterface::r#attach_node_tracking(self, payload)
3329 }
3330
3331 /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
3332 /// collection.
3333 ///
3334 /// A participant may only call
3335 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
3336 /// [`fuchsia.sysmem2/BufferCollection`].
3337 ///
3338 /// For buffer allocation to be attempted, all holders of a
3339 /// `BufferCollection` client end need to call `SetConstraints` before
3340 /// sysmem will attempt to allocate buffers.
3341 ///
3342 /// + request `constraints` These are the constraints on the buffer
3343 /// collection imposed by the sending client/participant. The
3344 /// `constraints` field is not required to be set. If not set, the client
3345 /// is not setting any actual constraints, but is indicating that the
3346 /// client has no constraints to set. A client that doesn't set the
3347 /// `constraints` field won't receive any VMO handles, but can still find
3348 /// out how many buffers were allocated and can still refer to buffers by
3349 /// their `buffer_index`.
3350 pub fn r#set_constraints(
3351 &self,
3352 mut payload: BufferCollectionSetConstraintsRequest,
3353 ) -> Result<(), fidl::Error> {
3354 BufferCollectionProxyInterface::r#set_constraints(self, payload)
3355 }
3356
3357 /// Wait until all buffers are allocated.
3358 ///
3359 /// This FIDL call completes when buffers have been allocated, or completes
3360 /// with some failure detail if allocation has been attempted but failed.
3361 ///
3362 /// The following must occur before buffers will be allocated:
3363 /// * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
3364 /// collection must be turned in via `BindSharedCollection` to get a
3365 /// [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
3366 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
3367 /// or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
3368 /// to them.
3369 /// * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
3370 /// must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
3371 /// sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
3372 /// sent to them.
3373 ///
3374 /// - result `buffer_collection_info` The VMO handles and other related
3375 /// info.
3376 /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
3377 /// cannot be fulfilled due to resource exhaustion.
3378 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
3379 /// malformed.
3380 /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
3381 /// request is valid but cannot be satisfied, perhaps due to hardware
3382 /// limitations. This can happen if participants have incompatible
3383 /// constraints (empty intersection, roughly speaking). See the log for
3384 /// more info. In cases where a participant could potentially be treated
3385 /// as optional, see [`BufferCollectionTokenGroup`]. When using
3386 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
3387 /// error code if there aren't enough buffers in the pre-existing
3388 /// collection to satisfy the constraints set on the attached token and
3389 /// any sub-tree of tokens derived from the attached token.
3390 pub fn r#wait_for_all_buffers_allocated(
3391 &self,
3392 ) -> fidl::client::QueryResponseFut<
3393 BufferCollectionWaitForAllBuffersAllocatedResult,
3394 fidl::encoding::DefaultFuchsiaResourceDialect,
3395 > {
3396 BufferCollectionProxyInterface::r#wait_for_all_buffers_allocated(self)
3397 }
3398
3399 /// Checks whether all the buffers have been allocated, in a polling
3400 /// fashion.
3401 ///
3402 /// * If the buffer collection has been allocated, returns success.
3403 /// * If the buffer collection failed allocation, returns the same
3404 /// [`fuchsia.sysmem2/Error`] as
3405 /// [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
3406 /// return.
3407 /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
3408 /// attempted allocation yet. This means that WaitForAllBuffersAllocated
3409 /// would not respond quickly.
3410 pub fn r#check_all_buffers_allocated(
3411 &self,
3412 ) -> fidl::client::QueryResponseFut<
3413 BufferCollectionCheckAllBuffersAllocatedResult,
3414 fidl::encoding::DefaultFuchsiaResourceDialect,
3415 > {
3416 BufferCollectionProxyInterface::r#check_all_buffers_allocated(self)
3417 }
3418
3419 /// Create a new token to add a new participant to an existing logical
3420 /// buffer collection, if the existing collection's buffer counts,
3421 /// constraints, and participants allow.
3422 ///
3423 /// This can be useful in replacing a failed participant, and/or in
3424 /// adding/re-adding a participant after buffers have already been
3425 /// allocated.
3426 ///
3427 /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
3428 /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
3429 /// goes through the normal procedure of setting constraints or closing
3430 /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
3431 /// clients' point of view, despite the possibility that all the buffers
3432 /// were actually allocated previously. This process is called "logical
3433 /// allocation". Most instances of "allocation" in docs for other messages
3434 /// can also be read as "allocation or logical allocation" while remaining
3435 /// valid, but we just say "allocation" in most places for brevity/clarity
3436 /// of explanation, with the details of "logical allocation" left for the
3437 /// docs here on `AttachToken`.
3438 ///
3439 /// Failure of an attached `Node` does not propagate to the parent of the
3440 /// attached `Node`. More generally, failure of a child `Node` is blocked
3441 /// from reaching its parent `Node` if the child is attached, or if the
3442 /// child is dispensable and the failure occurred after logical allocation
3443 /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
3444 ///
3445 /// A participant may in some scenarios choose to initially use a
3446 /// dispensable token for a given instance of a delegate participant, and
3447 /// then later if the first instance of that delegate participant fails, a
3448 /// new second instance of that delegate participant my be given a token
3449 /// created with `AttachToken`.
3450 ///
3451 /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
3452 /// client end, the token acts like any other token. The client can
3453 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
3454 /// and can send the token to a different process/participant. The
3455 /// `BufferCollectionToken` `Node` should be converted to a
3456 /// `BufferCollection` `Node` as normal by sending
3457 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
3458 /// without causing subtree failure by sending
3459 /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
3460 /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
3461 /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
3462 /// the `BufferCollection`.
3463 ///
3464 /// Within the subtree, a success result from
3465 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
3466 /// the subtree participants' constraints were satisfiable using the
3467 /// already-existing buffer collection, the already-established
3468 /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
3469 /// constraints, and the already-existing other participants (already added
3470 /// via successful logical allocation) and their specified buffer counts in
3471 /// their constraints. A failure result means the new participants'
3472 /// constraints cannot be satisfied using the existing buffer collection and
3473 /// its already-added participants. Creating a new collection instead may
3474 /// allow all participants' constraints to be satisfied, assuming
3475 /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
3476 /// used.
3477 ///
3478 /// A token created with `AttachToken` performs constraints aggregation with
3479 /// all constraints currently in effect on the buffer collection, plus the
3480 /// attached token under consideration plus child tokens under the attached
3481 /// token which are not themselves an attached token or under such a token.
3482 /// Further subtrees under this subtree are considered for logical
3483 /// allocation only after this subtree has completed logical allocation.
3484 ///
3485 /// Assignment of existing buffers to participants'
3486 /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
3487 /// etc is first-come first-served, but a child can't logically allocate
3488 /// before all its parents have sent `SetConstraints`.
3489 ///
3490 /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
3491 /// in contrast to `AttachToken`, has the created token `Node` + child
3492 /// `Node`(s) (in the created subtree but not in any subtree under this
3493 /// subtree) participate in constraints aggregation along with its parent
3494 /// during the parent's allocation or logical allocation.
3495 ///
3496 /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
3497 /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
3498 /// sysmem before the new token can be passed to `BindSharedCollection`. The
3499 /// `Sync` of the new token can be accomplished with
3500 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
3501 /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
3502 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
3503 /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
3504 /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
3505 /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
3506 /// created token, to also sync those additional tokens to sysmem using a
3507 /// single round-trip.
3508 ///
3509 /// All table fields are currently required.
3510 ///
3511 /// + request `rights_attentuation_mask` This allows attenuating the VMO
3512 /// rights of the subtree. These values for `rights_attenuation_mask`
3513 /// result in no attenuation (note that 0 is not on this list):
3514 /// + ZX_RIGHT_SAME_RIGHTS (preferred)
3515 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
3516 /// + request `token_request` The server end of the `BufferCollectionToken`
3517 /// channel. The client retains the client end.
3518 pub fn r#attach_token(
3519 &self,
3520 mut payload: BufferCollectionAttachTokenRequest,
3521 ) -> Result<(), fidl::Error> {
3522 BufferCollectionProxyInterface::r#attach_token(self, payload)
3523 }
3524
3525 /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
3526 /// buffers have been allocated and only the specified number of buffers (or
3527 /// fewer) remain in the buffer collection.
3528 ///
3529 /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
3530 /// client to wait until an old buffer collection is fully or mostly
3531 /// deallocated before attempting allocation of a new buffer collection. The
3532 /// eventpair is only signalled when the buffers of this collection have
3533 /// been fully deallocated (not just un-referenced by clients, but all the
3534 /// memory consumed by those buffers has been fully reclaimed/recycled), or
3535 /// when allocation or logical allocation fails for the tree or subtree
3536 /// including this [`fuchsia.sysmem2/BufferCollection`].
3537 ///
3538 /// The eventpair won't be signalled until allocation or logical allocation
3539 /// has completed; until then, the collection's current buffer count is
3540 /// ignored.
3541 ///
3542 /// If logical allocation fails for an attached subtree (using
3543 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
3544 /// eventpair will close during that failure regardless of the number of
3545 /// buffers potenitally allocated in the overall buffer collection. This is
3546 /// for logical allocation consistency with normal allocation.
3547 ///
3548 /// The lifetime signalled by this event includes asynchronous cleanup of
3549 /// allocated buffers, and this asynchronous cleanup cannot occur until all
3550 /// holders of VMO handles to the buffers have closed those VMO handles.
3551 /// Therefore, clients should take care not to become blocked forever
3552 /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
3553 /// participants using the logical buffer collection (including the waiter
3554 /// itself) are less trusted, less reliable, or potentially blocked by the
3555 /// wait itself. Waiting asynchronously is recommended. Setting a deadline
3556 /// for the client wait may be prudent, depending on details of how the
3557 /// collection and/or its VMOs are used or shared. Failure to allocate a
3558 /// new/replacement buffer collection is better than getting stuck forever.
3559 ///
3560 /// The sysmem server itself intentionally does not perform any waiting on
3561 /// already-failed collections' VMOs to finish cleaning up before attempting
3562 /// a new allocation, and the sysmem server intentionally doesn't retry
3563 /// allocation if a new allocation fails due to out of memory, even if that
3564 /// failure is potentially due to continued existence of an old collection's
3565 /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
3566 /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
3567 /// as long as the waiting client is careful to not create a deadlock.
3568 ///
3569 /// Continued existence of old collections that are still cleaning up is not
3570 /// the only reason that a new allocation may fail due to insufficient
3571 /// memory, even if the new allocation is allocating physically contiguous
3572 /// buffers. Overall system memory pressure can also be the cause of failure
3573 /// to allocate a new collection. See also
3574 /// [`fuchsia.memorypressure/Provider`].
3575 ///
3576 /// `AttachLifetimeTracking` is meant to be compatible with other protocols
3577 /// with a similar `AttachLifetimeTracking` message; duplicates of the same
3578 /// `eventpair` handle (server end) can be sent via more than one
3579 /// `AttachLifetimeTracking` message to different protocols, and the
3580 /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
3581 /// the conditions are met (all holders of duplicates have closed their
3582 /// server end handle(s)). Also, thanks to how eventpair endponts work, the
3583 /// client end can (also) be duplicated without preventing the
3584 /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
3585 ///
3586 /// The server intentionally doesn't "trust" any signals set on the
3587 /// `server_end`. This mechanism intentionally uses only
3588 /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
3589 /// "early", and is only set when all handles to the server end eventpair
3590 /// are closed. No meaning is associated with any of the other signals, and
3591 /// clients should ignore any other signal bits on either end of the
3592 /// `eventpair`.
3593 ///
3594 /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
3595 /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
3596 /// transfer without causing `BufferCollection` channel failure).
3597 ///
3598 /// All table fields are currently required.
3599 ///
3600 /// + request `server_end` This eventpair handle will be closed by the
3601 /// sysmem server when buffers have been allocated initially and the
3602 /// number of buffers is then less than or equal to `buffers_remaining`.
3603 /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
3604 /// fewer) buffers to be fully deallocated. A number greater than zero can
3605 /// be useful in situations where a known number of buffers are
3606 /// intentionally not closed so that the data can continue to be used,
3607 /// such as for keeping the last available video frame displayed in the UI
3608 /// even if the video stream was using protected output buffers. It's
3609 /// outside the scope of the `BufferCollection` interface (at least for
3610 /// now) to determine how many buffers may be held without closing, but
3611 /// it'll typically be in the range 0-2.
3612 pub fn r#attach_lifetime_tracking(
3613 &self,
3614 mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3615 ) -> Result<(), fidl::Error> {
3616 BufferCollectionProxyInterface::r#attach_lifetime_tracking(self, payload)
3617 }
3618}
3619
3620impl BufferCollectionProxyInterface for BufferCollectionProxy {
3621 type SyncResponseFut =
3622 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
3623 fn r#sync(&self) -> Self::SyncResponseFut {
3624 fn _decode(
3625 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3626 ) -> Result<(), fidl::Error> {
3627 let _response = fidl::client::decode_transaction_body::<
3628 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
3629 fidl::encoding::DefaultFuchsiaResourceDialect,
3630 0x11ac2555cf575b54,
3631 >(_buf?)?
3632 .into_result::<BufferCollectionMarker>("sync")?;
3633 Ok(_response)
3634 }
3635 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
3636 (),
3637 0x11ac2555cf575b54,
3638 fidl::encoding::DynamicFlags::FLEXIBLE,
3639 _decode,
3640 )
3641 }
3642
3643 fn r#release(&self) -> Result<(), fidl::Error> {
3644 self.client.send::<fidl::encoding::EmptyPayload>(
3645 (),
3646 0x6a5cae7d6d6e04c6,
3647 fidl::encoding::DynamicFlags::FLEXIBLE,
3648 )
3649 }
3650
3651 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
3652 self.client.send::<NodeSetNameRequest>(
3653 payload,
3654 0xb41f1624f48c1e9,
3655 fidl::encoding::DynamicFlags::FLEXIBLE,
3656 )
3657 }
3658
3659 fn r#set_debug_client_info(
3660 &self,
3661 mut payload: &NodeSetDebugClientInfoRequest,
3662 ) -> Result<(), fidl::Error> {
3663 self.client.send::<NodeSetDebugClientInfoRequest>(
3664 payload,
3665 0x5cde8914608d99b1,
3666 fidl::encoding::DynamicFlags::FLEXIBLE,
3667 )
3668 }
3669
3670 fn r#set_debug_timeout_log_deadline(
3671 &self,
3672 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3673 ) -> Result<(), fidl::Error> {
3674 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
3675 payload,
3676 0x716b0af13d5c0806,
3677 fidl::encoding::DynamicFlags::FLEXIBLE,
3678 )
3679 }
3680
3681 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3682 self.client.send::<fidl::encoding::EmptyPayload>(
3683 (),
3684 0x5209c77415b4dfad,
3685 fidl::encoding::DynamicFlags::FLEXIBLE,
3686 )
3687 }
3688
3689 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
3690 NodeGetNodeRefResponse,
3691 fidl::encoding::DefaultFuchsiaResourceDialect,
3692 >;
3693 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
3694 fn _decode(
3695 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3696 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
3697 let _response = fidl::client::decode_transaction_body::<
3698 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
3699 fidl::encoding::DefaultFuchsiaResourceDialect,
3700 0x5b3d0e51614df053,
3701 >(_buf?)?
3702 .into_result::<BufferCollectionMarker>("get_node_ref")?;
3703 Ok(_response)
3704 }
3705 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
3706 (),
3707 0x5b3d0e51614df053,
3708 fidl::encoding::DynamicFlags::FLEXIBLE,
3709 _decode,
3710 )
3711 }
3712
3713 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
3714 NodeIsAlternateForResult,
3715 fidl::encoding::DefaultFuchsiaResourceDialect,
3716 >;
3717 fn r#is_alternate_for(
3718 &self,
3719 mut payload: NodeIsAlternateForRequest,
3720 ) -> Self::IsAlternateForResponseFut {
3721 fn _decode(
3722 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3723 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
3724 let _response = fidl::client::decode_transaction_body::<
3725 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
3726 fidl::encoding::DefaultFuchsiaResourceDialect,
3727 0x3a58e00157e0825,
3728 >(_buf?)?
3729 .into_result::<BufferCollectionMarker>("is_alternate_for")?;
3730 Ok(_response.map(|x| x))
3731 }
3732 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
3733 &mut payload,
3734 0x3a58e00157e0825,
3735 fidl::encoding::DynamicFlags::FLEXIBLE,
3736 _decode,
3737 )
3738 }
3739
3740 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
3741 NodeGetBufferCollectionIdResponse,
3742 fidl::encoding::DefaultFuchsiaResourceDialect,
3743 >;
3744 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
3745 fn _decode(
3746 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3747 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
3748 let _response = fidl::client::decode_transaction_body::<
3749 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
3750 fidl::encoding::DefaultFuchsiaResourceDialect,
3751 0x77d19a494b78ba8c,
3752 >(_buf?)?
3753 .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
3754 Ok(_response)
3755 }
3756 self.client.send_query_and_decode::<
3757 fidl::encoding::EmptyPayload,
3758 NodeGetBufferCollectionIdResponse,
3759 >(
3760 (),
3761 0x77d19a494b78ba8c,
3762 fidl::encoding::DynamicFlags::FLEXIBLE,
3763 _decode,
3764 )
3765 }
3766
3767 fn r#set_weak(&self) -> Result<(), fidl::Error> {
3768 self.client.send::<fidl::encoding::EmptyPayload>(
3769 (),
3770 0x22dd3ea514eeffe1,
3771 fidl::encoding::DynamicFlags::FLEXIBLE,
3772 )
3773 }
3774
3775 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3776 self.client.send::<NodeSetWeakOkRequest>(
3777 &mut payload,
3778 0x38a44fc4d7724be9,
3779 fidl::encoding::DynamicFlags::FLEXIBLE,
3780 )
3781 }
3782
3783 fn r#attach_node_tracking(
3784 &self,
3785 mut payload: NodeAttachNodeTrackingRequest,
3786 ) -> Result<(), fidl::Error> {
3787 self.client.send::<NodeAttachNodeTrackingRequest>(
3788 &mut payload,
3789 0x3f22f2a293d3cdac,
3790 fidl::encoding::DynamicFlags::FLEXIBLE,
3791 )
3792 }
3793
3794 fn r#set_constraints(
3795 &self,
3796 mut payload: BufferCollectionSetConstraintsRequest,
3797 ) -> Result<(), fidl::Error> {
3798 self.client.send::<BufferCollectionSetConstraintsRequest>(
3799 &mut payload,
3800 0x1fde0f19d650197b,
3801 fidl::encoding::DynamicFlags::FLEXIBLE,
3802 )
3803 }
3804
3805 type WaitForAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3806 BufferCollectionWaitForAllBuffersAllocatedResult,
3807 fidl::encoding::DefaultFuchsiaResourceDialect,
3808 >;
3809 fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut {
3810 fn _decode(
3811 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3812 ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
3813 let _response = fidl::client::decode_transaction_body::<
3814 fidl::encoding::FlexibleResultType<
3815 BufferCollectionWaitForAllBuffersAllocatedResponse,
3816 Error,
3817 >,
3818 fidl::encoding::DefaultFuchsiaResourceDialect,
3819 0x62300344b61404e,
3820 >(_buf?)?
3821 .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
3822 Ok(_response.map(|x| x))
3823 }
3824 self.client.send_query_and_decode::<
3825 fidl::encoding::EmptyPayload,
3826 BufferCollectionWaitForAllBuffersAllocatedResult,
3827 >(
3828 (),
3829 0x62300344b61404e,
3830 fidl::encoding::DynamicFlags::FLEXIBLE,
3831 _decode,
3832 )
3833 }
3834
3835 type CheckAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3836 BufferCollectionCheckAllBuffersAllocatedResult,
3837 fidl::encoding::DefaultFuchsiaResourceDialect,
3838 >;
3839 fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut {
3840 fn _decode(
3841 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3842 ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
3843 let _response = fidl::client::decode_transaction_body::<
3844 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
3845 fidl::encoding::DefaultFuchsiaResourceDialect,
3846 0x35a5fe77ce939c10,
3847 >(_buf?)?
3848 .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
3849 Ok(_response.map(|x| x))
3850 }
3851 self.client.send_query_and_decode::<
3852 fidl::encoding::EmptyPayload,
3853 BufferCollectionCheckAllBuffersAllocatedResult,
3854 >(
3855 (),
3856 0x35a5fe77ce939c10,
3857 fidl::encoding::DynamicFlags::FLEXIBLE,
3858 _decode,
3859 )
3860 }
3861
3862 fn r#attach_token(
3863 &self,
3864 mut payload: BufferCollectionAttachTokenRequest,
3865 ) -> Result<(), fidl::Error> {
3866 self.client.send::<BufferCollectionAttachTokenRequest>(
3867 &mut payload,
3868 0x46ac7d0008492982,
3869 fidl::encoding::DynamicFlags::FLEXIBLE,
3870 )
3871 }
3872
3873 fn r#attach_lifetime_tracking(
3874 &self,
3875 mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3876 ) -> Result<(), fidl::Error> {
3877 self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
3878 &mut payload,
3879 0x3ecb510113116dcf,
3880 fidl::encoding::DynamicFlags::FLEXIBLE,
3881 )
3882 }
3883}
3884
3885pub struct BufferCollectionEventStream {
3886 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
3887}
3888
3889impl std::marker::Unpin for BufferCollectionEventStream {}
3890
3891impl futures::stream::FusedStream for BufferCollectionEventStream {
3892 fn is_terminated(&self) -> bool {
3893 self.event_receiver.is_terminated()
3894 }
3895}
3896
3897impl futures::Stream for BufferCollectionEventStream {
3898 type Item = Result<BufferCollectionEvent, fidl::Error>;
3899
3900 fn poll_next(
3901 mut self: std::pin::Pin<&mut Self>,
3902 cx: &mut std::task::Context<'_>,
3903 ) -> std::task::Poll<Option<Self::Item>> {
3904 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
3905 &mut self.event_receiver,
3906 cx
3907 )?) {
3908 Some(buf) => std::task::Poll::Ready(Some(BufferCollectionEvent::decode(buf))),
3909 None => std::task::Poll::Ready(None),
3910 }
3911 }
3912}
3913
3914#[derive(Debug)]
3915pub enum BufferCollectionEvent {
3916 #[non_exhaustive]
3917 _UnknownEvent {
3918 /// Ordinal of the event that was sent.
3919 ordinal: u64,
3920 },
3921}
3922
3923impl BufferCollectionEvent {
3924 /// Decodes a message buffer as a [`BufferCollectionEvent`].
3925 fn decode(
3926 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
3927 ) -> Result<BufferCollectionEvent, fidl::Error> {
3928 let (bytes, _handles) = buf.split_mut();
3929 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3930 debug_assert_eq!(tx_header.tx_id, 0);
3931 match tx_header.ordinal {
3932 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
3933 Ok(BufferCollectionEvent::_UnknownEvent { ordinal: tx_header.ordinal })
3934 }
3935 _ => Err(fidl::Error::UnknownOrdinal {
3936 ordinal: tx_header.ordinal,
3937 protocol_name:
3938 <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
3939 }),
3940 }
3941 }
3942}
3943
3944/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollection.
3945pub struct BufferCollectionRequestStream {
3946 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3947 is_terminated: bool,
3948}
3949
3950impl std::marker::Unpin for BufferCollectionRequestStream {}
3951
3952impl futures::stream::FusedStream for BufferCollectionRequestStream {
3953 fn is_terminated(&self) -> bool {
3954 self.is_terminated
3955 }
3956}
3957
3958impl fidl::endpoints::RequestStream for BufferCollectionRequestStream {
3959 type Protocol = BufferCollectionMarker;
3960 type ControlHandle = BufferCollectionControlHandle;
3961
3962 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
3963 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
3964 }
3965
3966 fn control_handle(&self) -> Self::ControlHandle {
3967 BufferCollectionControlHandle { inner: self.inner.clone() }
3968 }
3969
3970 fn into_inner(
3971 self,
3972 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
3973 {
3974 (self.inner, self.is_terminated)
3975 }
3976
3977 fn from_inner(
3978 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3979 is_terminated: bool,
3980 ) -> Self {
3981 Self { inner, is_terminated }
3982 }
3983}
3984
3985impl futures::Stream for BufferCollectionRequestStream {
3986 type Item = Result<BufferCollectionRequest, fidl::Error>;
3987
3988 fn poll_next(
3989 mut self: std::pin::Pin<&mut Self>,
3990 cx: &mut std::task::Context<'_>,
3991 ) -> std::task::Poll<Option<Self::Item>> {
3992 let this = &mut *self;
3993 if this.inner.check_shutdown(cx) {
3994 this.is_terminated = true;
3995 return std::task::Poll::Ready(None);
3996 }
3997 if this.is_terminated {
3998 panic!("polled BufferCollectionRequestStream after completion");
3999 }
4000 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
4001 |bytes, handles| {
4002 match this.inner.channel().read_etc(cx, bytes, handles) {
4003 std::task::Poll::Ready(Ok(())) => {}
4004 std::task::Poll::Pending => return std::task::Poll::Pending,
4005 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
4006 this.is_terminated = true;
4007 return std::task::Poll::Ready(None);
4008 }
4009 std::task::Poll::Ready(Err(e)) => {
4010 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
4011 e.into(),
4012 ))));
4013 }
4014 }
4015
4016 // A message has been received from the channel
4017 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
4018
4019 std::task::Poll::Ready(Some(match header.ordinal {
4020 0x11ac2555cf575b54 => {
4021 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4022 let mut req = fidl::new_empty!(
4023 fidl::encoding::EmptyPayload,
4024 fidl::encoding::DefaultFuchsiaResourceDialect
4025 );
4026 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4027 let control_handle =
4028 BufferCollectionControlHandle { inner: this.inner.clone() };
4029 Ok(BufferCollectionRequest::Sync {
4030 responder: BufferCollectionSyncResponder {
4031 control_handle: std::mem::ManuallyDrop::new(control_handle),
4032 tx_id: header.tx_id,
4033 },
4034 })
4035 }
4036 0x6a5cae7d6d6e04c6 => {
4037 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4038 let mut req = fidl::new_empty!(
4039 fidl::encoding::EmptyPayload,
4040 fidl::encoding::DefaultFuchsiaResourceDialect
4041 );
4042 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4043 let control_handle =
4044 BufferCollectionControlHandle { inner: this.inner.clone() };
4045 Ok(BufferCollectionRequest::Release { control_handle })
4046 }
4047 0xb41f1624f48c1e9 => {
4048 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4049 let mut req = fidl::new_empty!(
4050 NodeSetNameRequest,
4051 fidl::encoding::DefaultFuchsiaResourceDialect
4052 );
4053 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
4054 let control_handle =
4055 BufferCollectionControlHandle { inner: this.inner.clone() };
4056 Ok(BufferCollectionRequest::SetName { payload: req, control_handle })
4057 }
4058 0x5cde8914608d99b1 => {
4059 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4060 let mut req = fidl::new_empty!(
4061 NodeSetDebugClientInfoRequest,
4062 fidl::encoding::DefaultFuchsiaResourceDialect
4063 );
4064 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
4065 let control_handle =
4066 BufferCollectionControlHandle { inner: this.inner.clone() };
4067 Ok(BufferCollectionRequest::SetDebugClientInfo {
4068 payload: req,
4069 control_handle,
4070 })
4071 }
4072 0x716b0af13d5c0806 => {
4073 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4074 let mut req = fidl::new_empty!(
4075 NodeSetDebugTimeoutLogDeadlineRequest,
4076 fidl::encoding::DefaultFuchsiaResourceDialect
4077 );
4078 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
4079 let control_handle =
4080 BufferCollectionControlHandle { inner: this.inner.clone() };
4081 Ok(BufferCollectionRequest::SetDebugTimeoutLogDeadline {
4082 payload: req,
4083 control_handle,
4084 })
4085 }
4086 0x5209c77415b4dfad => {
4087 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4088 let mut req = fidl::new_empty!(
4089 fidl::encoding::EmptyPayload,
4090 fidl::encoding::DefaultFuchsiaResourceDialect
4091 );
4092 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4093 let control_handle =
4094 BufferCollectionControlHandle { inner: this.inner.clone() };
4095 Ok(BufferCollectionRequest::SetVerboseLogging { control_handle })
4096 }
4097 0x5b3d0e51614df053 => {
4098 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4099 let mut req = fidl::new_empty!(
4100 fidl::encoding::EmptyPayload,
4101 fidl::encoding::DefaultFuchsiaResourceDialect
4102 );
4103 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4104 let control_handle =
4105 BufferCollectionControlHandle { inner: this.inner.clone() };
4106 Ok(BufferCollectionRequest::GetNodeRef {
4107 responder: BufferCollectionGetNodeRefResponder {
4108 control_handle: std::mem::ManuallyDrop::new(control_handle),
4109 tx_id: header.tx_id,
4110 },
4111 })
4112 }
4113 0x3a58e00157e0825 => {
4114 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4115 let mut req = fidl::new_empty!(
4116 NodeIsAlternateForRequest,
4117 fidl::encoding::DefaultFuchsiaResourceDialect
4118 );
4119 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
4120 let control_handle =
4121 BufferCollectionControlHandle { inner: this.inner.clone() };
4122 Ok(BufferCollectionRequest::IsAlternateFor {
4123 payload: req,
4124 responder: BufferCollectionIsAlternateForResponder {
4125 control_handle: std::mem::ManuallyDrop::new(control_handle),
4126 tx_id: header.tx_id,
4127 },
4128 })
4129 }
4130 0x77d19a494b78ba8c => {
4131 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4132 let mut req = fidl::new_empty!(
4133 fidl::encoding::EmptyPayload,
4134 fidl::encoding::DefaultFuchsiaResourceDialect
4135 );
4136 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4137 let control_handle =
4138 BufferCollectionControlHandle { inner: this.inner.clone() };
4139 Ok(BufferCollectionRequest::GetBufferCollectionId {
4140 responder: BufferCollectionGetBufferCollectionIdResponder {
4141 control_handle: std::mem::ManuallyDrop::new(control_handle),
4142 tx_id: header.tx_id,
4143 },
4144 })
4145 }
4146 0x22dd3ea514eeffe1 => {
4147 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4148 let mut req = fidl::new_empty!(
4149 fidl::encoding::EmptyPayload,
4150 fidl::encoding::DefaultFuchsiaResourceDialect
4151 );
4152 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4153 let control_handle =
4154 BufferCollectionControlHandle { inner: this.inner.clone() };
4155 Ok(BufferCollectionRequest::SetWeak { control_handle })
4156 }
4157 0x38a44fc4d7724be9 => {
4158 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4159 let mut req = fidl::new_empty!(
4160 NodeSetWeakOkRequest,
4161 fidl::encoding::DefaultFuchsiaResourceDialect
4162 );
4163 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
4164 let control_handle =
4165 BufferCollectionControlHandle { inner: this.inner.clone() };
4166 Ok(BufferCollectionRequest::SetWeakOk { payload: req, control_handle })
4167 }
4168 0x3f22f2a293d3cdac => {
4169 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4170 let mut req = fidl::new_empty!(
4171 NodeAttachNodeTrackingRequest,
4172 fidl::encoding::DefaultFuchsiaResourceDialect
4173 );
4174 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4175 let control_handle =
4176 BufferCollectionControlHandle { inner: this.inner.clone() };
4177 Ok(BufferCollectionRequest::AttachNodeTracking {
4178 payload: req,
4179 control_handle,
4180 })
4181 }
4182 0x1fde0f19d650197b => {
4183 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4184 let mut req = fidl::new_empty!(
4185 BufferCollectionSetConstraintsRequest,
4186 fidl::encoding::DefaultFuchsiaResourceDialect
4187 );
4188 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionSetConstraintsRequest>(&header, _body_bytes, handles, &mut req)?;
4189 let control_handle =
4190 BufferCollectionControlHandle { inner: this.inner.clone() };
4191 Ok(BufferCollectionRequest::SetConstraints { payload: req, control_handle })
4192 }
4193 0x62300344b61404e => {
4194 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4195 let mut req = fidl::new_empty!(
4196 fidl::encoding::EmptyPayload,
4197 fidl::encoding::DefaultFuchsiaResourceDialect
4198 );
4199 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4200 let control_handle =
4201 BufferCollectionControlHandle { inner: this.inner.clone() };
4202 Ok(BufferCollectionRequest::WaitForAllBuffersAllocated {
4203 responder: BufferCollectionWaitForAllBuffersAllocatedResponder {
4204 control_handle: std::mem::ManuallyDrop::new(control_handle),
4205 tx_id: header.tx_id,
4206 },
4207 })
4208 }
4209 0x35a5fe77ce939c10 => {
4210 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4211 let mut req = fidl::new_empty!(
4212 fidl::encoding::EmptyPayload,
4213 fidl::encoding::DefaultFuchsiaResourceDialect
4214 );
4215 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4216 let control_handle =
4217 BufferCollectionControlHandle { inner: this.inner.clone() };
4218 Ok(BufferCollectionRequest::CheckAllBuffersAllocated {
4219 responder: BufferCollectionCheckAllBuffersAllocatedResponder {
4220 control_handle: std::mem::ManuallyDrop::new(control_handle),
4221 tx_id: header.tx_id,
4222 },
4223 })
4224 }
4225 0x46ac7d0008492982 => {
4226 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4227 let mut req = fidl::new_empty!(
4228 BufferCollectionAttachTokenRequest,
4229 fidl::encoding::DefaultFuchsiaResourceDialect
4230 );
4231 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachTokenRequest>(&header, _body_bytes, handles, &mut req)?;
4232 let control_handle =
4233 BufferCollectionControlHandle { inner: this.inner.clone() };
4234 Ok(BufferCollectionRequest::AttachToken { payload: req, control_handle })
4235 }
4236 0x3ecb510113116dcf => {
4237 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4238 let mut req = fidl::new_empty!(
4239 BufferCollectionAttachLifetimeTrackingRequest,
4240 fidl::encoding::DefaultFuchsiaResourceDialect
4241 );
4242 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachLifetimeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4243 let control_handle =
4244 BufferCollectionControlHandle { inner: this.inner.clone() };
4245 Ok(BufferCollectionRequest::AttachLifetimeTracking {
4246 payload: req,
4247 control_handle,
4248 })
4249 }
4250 _ if header.tx_id == 0
4251 && header
4252 .dynamic_flags()
4253 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4254 {
4255 Ok(BufferCollectionRequest::_UnknownMethod {
4256 ordinal: header.ordinal,
4257 control_handle: BufferCollectionControlHandle {
4258 inner: this.inner.clone(),
4259 },
4260 method_type: fidl::MethodType::OneWay,
4261 })
4262 }
4263 _ if header
4264 .dynamic_flags()
4265 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4266 {
4267 this.inner.send_framework_err(
4268 fidl::encoding::FrameworkErr::UnknownMethod,
4269 header.tx_id,
4270 header.ordinal,
4271 header.dynamic_flags(),
4272 (bytes, handles),
4273 )?;
4274 Ok(BufferCollectionRequest::_UnknownMethod {
4275 ordinal: header.ordinal,
4276 control_handle: BufferCollectionControlHandle {
4277 inner: this.inner.clone(),
4278 },
4279 method_type: fidl::MethodType::TwoWay,
4280 })
4281 }
4282 _ => Err(fidl::Error::UnknownOrdinal {
4283 ordinal: header.ordinal,
4284 protocol_name:
4285 <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
4286 }),
4287 }))
4288 },
4289 )
4290 }
4291}
4292
4293/// [`fuchsia.sysmem2/BufferCollection`] is a connection directly from a
4294/// participant to sysmem re. a buffer collection; often the buffer collection
4295/// is shared with other participants which have their own `BufferCollection`
4296/// client end(s) associated with the same buffer collection. In other words,
4297/// an instance of the `BufferCollection` interface is a view of a buffer
4298/// collection, not the buffer collection itself.
4299///
4300/// The `BufferCollection` connection exists to facilitate async indication of
4301/// when the buffer collection has been populated with buffers.
4302///
4303/// Also, the channel's closure by the sysmem server is an indication to the
4304/// client that the client should close all VMO handles that were obtained from
4305/// the `BufferCollection` ASAP.
4306///
4307/// Some buffer collections can use enough memory that it can be worth avoiding
4308/// allocation overlap (in time) using
4309/// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] so that the
4310/// initiator can tell when enough buffers of the buffer collection have been
4311/// fully deallocated prior to the initiator allocating a new buffer collection.
4312///
4313/// Epitaphs are not used in this protocol.
4314#[derive(Debug)]
4315pub enum BufferCollectionRequest {
4316 /// Ensure that previous messages have been received server side. This is
4317 /// particularly useful after previous messages that created new tokens,
4318 /// because a token must be known to the sysmem server before sending the
4319 /// token to another participant.
4320 ///
4321 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
4322 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
4323 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
4324 /// to mitigate the possibility of a hostile/fake
4325 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
4326 /// Another way is to pass the token to
4327 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
4328 /// the token as part of exchanging it for a
4329 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
4330 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
4331 /// of stalling.
4332 ///
4333 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
4334 /// and then starting and completing a `Sync`, it's then safe to send the
4335 /// `BufferCollectionToken` client ends to other participants knowing the
4336 /// server will recognize the tokens when they're sent by the other
4337 /// participants to sysmem in a
4338 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
4339 /// efficient way to create tokens while avoiding unnecessary round trips.
4340 ///
4341 /// Other options include waiting for each
4342 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
4343 /// individually (using separate call to `Sync` after each), or calling
4344 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
4345 /// converted to a `BufferCollection` via
4346 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
4347 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
4348 /// the sync step and can create multiple tokens at once.
4349 Sync { responder: BufferCollectionSyncResponder },
4350 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
4351 ///
4352 /// Normally a participant will convert a `BufferCollectionToken` into a
4353 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
4354 /// `Release` via the token (and then close the channel immediately or
4355 /// shortly later in response to server closing the server end), which
4356 /// avoids causing buffer collection failure. Without a prior `Release`,
4357 /// closing the `BufferCollectionToken` client end will cause buffer
4358 /// collection failure.
4359 ///
4360 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
4361 ///
4362 /// By default the server handles unexpected closure of a
4363 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
4364 /// first) by failing the buffer collection. Partly this is to expedite
4365 /// closing VMO handles to reclaim memory when any participant fails. If a
4366 /// participant would like to cleanly close a `BufferCollection` without
4367 /// causing buffer collection failure, the participant can send `Release`
4368 /// before closing the `BufferCollection` client end. The `Release` can
4369 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
4370 /// buffer collection won't require constraints from this node in order to
4371 /// allocate. If after `SetConstraints`, the constraints are retained and
4372 /// aggregated, despite the lack of `BufferCollection` connection at the
4373 /// time of constraints aggregation.
4374 ///
4375 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
4376 ///
4377 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
4378 /// end (without `Release` first) will trigger failure of the buffer
4379 /// collection. To close a `BufferCollectionTokenGroup` channel without
4380 /// failing the buffer collection, ensure that AllChildrenPresent() has been
4381 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
4382 /// client end.
4383 ///
4384 /// If `Release` occurs before
4385 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
4386 /// buffer collection will fail (triggered by reception of `Release` without
4387 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
4388 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
4389 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
4390 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
4391 /// close requires `AllChildrenPresent` (if not already sent), then
4392 /// `Release`, then close client end.
4393 ///
4394 /// If `Release` occurs after `AllChildrenPresent`, the children and all
4395 /// their constraints remain intact (just as they would if the
4396 /// `BufferCollectionTokenGroup` channel had remained open), and the client
4397 /// end close doesn't trigger buffer collection failure.
4398 ///
4399 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
4400 ///
4401 /// For brevity, the per-channel-protocol paragraphs above ignore the
4402 /// separate failure domain created by
4403 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
4404 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
4405 /// unexpectedly closes (without `Release` first) and that client end is
4406 /// under a failure domain, instead of failing the whole buffer collection,
4407 /// the failure domain is failed, but the buffer collection itself is
4408 /// isolated from failure of the failure domain. Such failure domains can be
4409 /// nested, in which case only the inner-most failure domain in which the
4410 /// `Node` resides fails.
4411 Release { control_handle: BufferCollectionControlHandle },
4412 /// Set a name for VMOs in this buffer collection.
4413 ///
4414 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
4415 /// will be truncated to fit. The name of the vmo will be suffixed with the
4416 /// buffer index within the collection (if the suffix fits within
4417 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
4418 /// listed in the inspect data.
4419 ///
4420 /// The name only affects VMOs allocated after the name is set; this call
4421 /// does not rename existing VMOs. If multiple clients set different names
4422 /// then the larger priority value will win. Setting a new name with the
4423 /// same priority as a prior name doesn't change the name.
4424 ///
4425 /// All table fields are currently required.
4426 ///
4427 /// + request `priority` The name is only set if this is the first `SetName`
4428 /// or if `priority` is greater than any previous `priority` value in
4429 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
4430 /// + request `name` The name for VMOs created under this buffer collection.
4431 SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionControlHandle },
4432 /// Set information about the current client that can be used by sysmem to
4433 /// help diagnose leaking memory and allocation stalls waiting for a
4434 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
4435 ///
4436 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
4437 /// `Node`(s) derived from this `Node`, unless overriden by
4438 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
4439 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
4440 ///
4441 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
4442 /// `Allocator` is the most efficient way to ensure that all
4443 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
4444 /// set, and is also more efficient than separately sending the same debug
4445 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
4446 /// created [`fuchsia.sysmem2/Node`].
4447 ///
4448 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
4449 /// indicate which client is closing their channel first, leading to subtree
4450 /// failure (which can be normal if the purpose of the subtree is over, but
4451 /// if happening earlier than expected, the client-channel-specific name can
4452 /// help diagnose where the failure is first coming from, from sysmem's
4453 /// point of view).
4454 ///
4455 /// All table fields are currently required.
4456 ///
4457 /// + request `name` This can be an arbitrary string, but the current
4458 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
4459 /// + request `id` This can be an arbitrary id, but the current process ID
4460 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
4461 SetDebugClientInfo {
4462 payload: NodeSetDebugClientInfoRequest,
4463 control_handle: BufferCollectionControlHandle,
4464 },
4465 /// Sysmem logs a warning if sysmem hasn't seen
4466 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
4467 /// within 5 seconds after creation of a new collection.
4468 ///
4469 /// Clients can call this method to change when the log is printed. If
4470 /// multiple client set the deadline, it's unspecified which deadline will
4471 /// take effect.
4472 ///
4473 /// In most cases the default works well.
4474 ///
4475 /// All table fields are currently required.
4476 ///
4477 /// + request `deadline` The time at which sysmem will start trying to log
4478 /// the warning, unless all constraints are with sysmem by then.
4479 SetDebugTimeoutLogDeadline {
4480 payload: NodeSetDebugTimeoutLogDeadlineRequest,
4481 control_handle: BufferCollectionControlHandle,
4482 },
4483 /// This enables verbose logging for the buffer collection.
4484 ///
4485 /// Verbose logging includes constraints set via
4486 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
4487 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
4488 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
4489 /// the tree of `Node`(s).
4490 ///
4491 /// Normally sysmem prints only a single line complaint when aggregation
4492 /// fails, with just the specific detailed reason that aggregation failed,
4493 /// with little surrounding context. While this is often enough to diagnose
4494 /// a problem if only a small change was made and everything was working
4495 /// before the small change, it's often not particularly helpful for getting
4496 /// a new buffer collection to work for the first time. Especially with
4497 /// more complex trees of nodes, involving things like
4498 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
4499 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
4500 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
4501 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
4502 /// looks like and why it's failing a logical allocation, or why a tree or
4503 /// subtree is failing sooner than expected.
4504 ///
4505 /// The intent of the extra logging is to be acceptable from a performance
4506 /// point of view, under the assumption that verbose logging is only enabled
4507 /// on a low number of buffer collections. If we're not tracking down a bug,
4508 /// we shouldn't send this message.
4509 SetVerboseLogging { control_handle: BufferCollectionControlHandle },
4510 /// This gets a handle that can be used as a parameter to
4511 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
4512 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
4513 /// client obtained this handle from this `Node`.
4514 ///
4515 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
4516 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
4517 /// despite the two calls typically being on different channels.
4518 ///
4519 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
4520 ///
4521 /// All table fields are currently required.
4522 ///
4523 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
4524 /// different `Node` channel, to prove that the client obtained the handle
4525 /// from this `Node`.
4526 GetNodeRef { responder: BufferCollectionGetNodeRefResponder },
4527 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
4528 /// rooted at a different child token of a common parent
4529 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
4530 /// passed-in `node_ref`.
4531 ///
4532 /// This call is for assisting with admission control de-duplication, and
4533 /// with debugging.
4534 ///
4535 /// The `node_ref` must be obtained using
4536 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
4537 ///
4538 /// The `node_ref` can be a duplicated handle; it's not necessary to call
4539 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
4540 ///
4541 /// If a calling token may not actually be a valid token at all due to a
4542 /// potentially hostile/untrusted provider of the token, call
4543 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
4544 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
4545 /// never responds due to a calling token not being a real token (not really
4546 /// talking to sysmem). Another option is to call
4547 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
4548 /// which also validates the token along with converting it to a
4549 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
4550 ///
4551 /// All table fields are currently required.
4552 ///
4553 /// - response `is_alternate`
4554 /// - true: The first parent node in common between the calling node and
4555 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
4556 /// that the calling `Node` and the `node_ref` `Node` will not have both
4557 /// their constraints apply - rather sysmem will choose one or the other
4558 /// of the constraints - never both. This is because only one child of
4559 /// a `BufferCollectionTokenGroup` is selected during logical
4560 /// allocation, with only that one child's subtree contributing to
4561 /// constraints aggregation.
4562 /// - false: The first parent node in common between the calling `Node`
4563 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
4564 /// Currently, this means the first parent node in common is a
4565 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
4566 /// `Release`ed). This means that the calling `Node` and the `node_ref`
4567 /// `Node` may have both their constraints apply during constraints
4568 /// aggregation of the logical allocation, if both `Node`(s) are
4569 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
4570 /// this case, there is no `BufferCollectionTokenGroup` that will
4571 /// directly prevent the two `Node`(s) from both being selected and
4572 /// their constraints both aggregated, but even when false, one or both
4573 /// `Node`(s) may still be eliminated from consideration if one or both
4574 /// `Node`(s) has a direct or indirect parent
4575 /// `BufferCollectionTokenGroup` which selects a child subtree other
4576 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
4577 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
4578 /// associated with the same buffer collection as the calling `Node`.
4579 /// Another reason for this error is if the `node_ref` is an
4580 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
4581 /// a real `node_ref` obtained from `GetNodeRef`.
4582 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
4583 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
4584 /// the needed rights expected on a real `node_ref`.
4585 /// * No other failing status codes are returned by this call. However,
4586 /// sysmem may add additional codes in future, so the client should have
4587 /// sensible default handling for any failing status code.
4588 IsAlternateFor {
4589 payload: NodeIsAlternateForRequest,
4590 responder: BufferCollectionIsAlternateForResponder,
4591 },
4592 /// Get the buffer collection ID. This ID is also available from
4593 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
4594 /// within the collection).
4595 ///
4596 /// This call is mainly useful in situations where we can't convey a
4597 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
4598 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
4599 /// handle, which can be joined back up with a `BufferCollection` client end
4600 /// that was created via a different path. Prefer to convey a
4601 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
4602 ///
4603 /// Trusting a `buffer_collection_id` value from a source other than sysmem
4604 /// is analogous to trusting a koid value from a source other than zircon.
4605 /// Both should be avoided unless really necessary, and both require
4606 /// caution. In some situations it may be reasonable to refer to a
4607 /// pre-established `BufferCollection` by `buffer_collection_id` via a
4608 /// protocol for efficiency reasons, but an incoming value purporting to be
4609 /// a `buffer_collection_id` is not sufficient alone to justify granting the
4610 /// sender of the `buffer_collection_id` any capability. The sender must
4611 /// first prove to a receiver that the sender has/had a VMO or has/had a
4612 /// `BufferCollectionToken` to the same collection by sending a handle that
4613 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
4614 /// `buffer_collection_id` value. The receiver should take care to avoid
4615 /// assuming that a sender had a `BufferCollectionToken` in cases where the
4616 /// sender has only proven that the sender had a VMO.
4617 ///
4618 /// - response `buffer_collection_id` This ID is unique per buffer
4619 /// collection per boot. Each buffer is uniquely identified by the
4620 /// `buffer_collection_id` and `buffer_index` together.
4621 GetBufferCollectionId { responder: BufferCollectionGetBufferCollectionIdResponder },
4622 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
4623 /// created after this message to weak, which means that a client's `Node`
4624 /// client end (or a child created after this message) is not alone
4625 /// sufficient to keep allocated VMOs alive.
4626 ///
4627 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
4628 /// `close_weak_asap`.
4629 ///
4630 /// This message is only permitted before the `Node` becomes ready for
4631 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
4632 /// * `BufferCollectionToken`: any time
4633 /// * `BufferCollection`: before `SetConstraints`
4634 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
4635 ///
4636 /// Currently, no conversion from strong `Node` to weak `Node` after ready
4637 /// for allocation is provided, but a client can simulate that by creating
4638 /// an additional `Node` before allocation and setting that additional
4639 /// `Node` to weak, and then potentially at some point later sending
4640 /// `Release` and closing the client end of the client's strong `Node`, but
4641 /// keeping the client's weak `Node`.
4642 ///
4643 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
4644 /// collection failure (all `Node` client end(s) will see
4645 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
4646 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
4647 /// this situation until all `Node`(s) are ready for allocation. For initial
4648 /// allocation to succeed, at least one strong `Node` is required to exist
4649 /// at allocation time, but after that client receives VMO handles, that
4650 /// client can `BufferCollection.Release` and close the client end without
4651 /// causing this type of failure.
4652 ///
4653 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
4654 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
4655 /// separately as appropriate.
4656 SetWeak { control_handle: BufferCollectionControlHandle },
4657 /// This indicates to sysmem that the client is prepared to pay attention to
4658 /// `close_weak_asap`.
4659 ///
4660 /// If sent, this message must be before
4661 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
4662 ///
4663 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
4664 /// send this message before `WaitForAllBuffersAllocated`, or a parent
4665 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
4666 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
4667 /// trigger buffer collection failure.
4668 ///
4669 /// This message is necessary because weak sysmem VMOs have not always been
4670 /// a thing, so older clients are not aware of the need to pay attention to
4671 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
4672 /// sysmem weak VMO handles asap. By having this message and requiring
4673 /// participants to indicate their acceptance of this aspect of the overall
4674 /// protocol, we avoid situations where an older client is delivered a weak
4675 /// VMO without any way for sysmem to get that VMO to close quickly later
4676 /// (and on a per-buffer basis).
4677 ///
4678 /// A participant that doesn't handle `close_weak_asap` and also doesn't
4679 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
4680 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
4681 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
4682 /// same participant has a child/delegate which does retrieve VMOs, that
4683 /// child/delegate will need to send `SetWeakOk` before
4684 /// `WaitForAllBuffersAllocated`.
4685 ///
4686 /// + request `for_child_nodes_also` If present and true, this means direct
4687 /// child nodes of this node created after this message plus all
4688 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
4689 /// those nodes. Any child node of this node that was created before this
4690 /// message is not included. This setting is "sticky" in the sense that a
4691 /// subsequent `SetWeakOk` without this bool set to true does not reset
4692 /// the server-side bool. If this creates a problem for a participant, a
4693 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
4694 /// tokens instead, as appropriate. A participant should only set
4695 /// `for_child_nodes_also` true if the participant can really promise to
4696 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
4697 /// weak VMO handles held by participants holding the corresponding child
4698 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
4699 /// which are using sysmem(1) can be weak, despite the clients of those
4700 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
4701 /// direct way to find out about `close_weak_asap`. This only applies to
4702 /// descendents of this `Node` which are using sysmem(1), not to this
4703 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
4704 /// token, which will fail allocation unless an ancestor of this `Node`
4705 /// specified `for_child_nodes_also` true.
4706 SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionControlHandle },
4707 /// The server_end will be closed after this `Node` and any child nodes have
4708 /// have released their buffer counts, making those counts available for
4709 /// reservation by a different `Node` via
4710 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
4711 ///
4712 /// The `Node` buffer counts may not be released until the entire tree of
4713 /// `Node`(s) is closed or failed, because
4714 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
4715 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
4716 /// `Node` buffer counts remain reserved until the orphaned node is later
4717 /// cleaned up.
4718 ///
4719 /// If the `Node` exceeds a fairly large number of attached eventpair server
4720 /// ends, a log message will indicate this and the `Node` (and the
4721 /// appropriate) sub-tree will fail.
4722 ///
4723 /// The `server_end` will remain open when
4724 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
4725 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
4726 /// [`fuchsia.sysmem2/BufferCollection`].
4727 ///
4728 /// This message can also be used with a
4729 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
4730 AttachNodeTracking {
4731 payload: NodeAttachNodeTrackingRequest,
4732 control_handle: BufferCollectionControlHandle,
4733 },
4734 /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
4735 /// collection.
4736 ///
4737 /// A participant may only call
4738 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
4739 /// [`fuchsia.sysmem2/BufferCollection`].
4740 ///
4741 /// For buffer allocation to be attempted, all holders of a
4742 /// `BufferCollection` client end need to call `SetConstraints` before
4743 /// sysmem will attempt to allocate buffers.
4744 ///
4745 /// + request `constraints` These are the constraints on the buffer
4746 /// collection imposed by the sending client/participant. The
4747 /// `constraints` field is not required to be set. If not set, the client
4748 /// is not setting any actual constraints, but is indicating that the
4749 /// client has no constraints to set. A client that doesn't set the
4750 /// `constraints` field won't receive any VMO handles, but can still find
4751 /// out how many buffers were allocated and can still refer to buffers by
4752 /// their `buffer_index`.
4753 SetConstraints {
4754 payload: BufferCollectionSetConstraintsRequest,
4755 control_handle: BufferCollectionControlHandle,
4756 },
4757 /// Wait until all buffers are allocated.
4758 ///
4759 /// This FIDL call completes when buffers have been allocated, or completes
4760 /// with some failure detail if allocation has been attempted but failed.
4761 ///
4762 /// The following must occur before buffers will be allocated:
4763 /// * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
4764 /// collection must be turned in via `BindSharedCollection` to get a
4765 /// [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
4766 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
4767 /// or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
4768 /// to them.
4769 /// * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
4770 /// must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
4771 /// sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
4772 /// sent to them.
4773 ///
4774 /// - result `buffer_collection_info` The VMO handles and other related
4775 /// info.
4776 /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
4777 /// cannot be fulfilled due to resource exhaustion.
4778 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
4779 /// malformed.
4780 /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
4781 /// request is valid but cannot be satisfied, perhaps due to hardware
4782 /// limitations. This can happen if participants have incompatible
4783 /// constraints (empty intersection, roughly speaking). See the log for
4784 /// more info. In cases where a participant could potentially be treated
4785 /// as optional, see [`BufferCollectionTokenGroup`]. When using
4786 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
4787 /// error code if there aren't enough buffers in the pre-existing
4788 /// collection to satisfy the constraints set on the attached token and
4789 /// any sub-tree of tokens derived from the attached token.
4790 WaitForAllBuffersAllocated { responder: BufferCollectionWaitForAllBuffersAllocatedResponder },
4791 /// Checks whether all the buffers have been allocated, in a polling
4792 /// fashion.
4793 ///
4794 /// * If the buffer collection has been allocated, returns success.
4795 /// * If the buffer collection failed allocation, returns the same
4796 /// [`fuchsia.sysmem2/Error`] as
4797 /// [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
4798 /// return.
4799 /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
4800 /// attempted allocation yet. This means that WaitForAllBuffersAllocated
4801 /// would not respond quickly.
4802 CheckAllBuffersAllocated { responder: BufferCollectionCheckAllBuffersAllocatedResponder },
4803 /// Create a new token to add a new participant to an existing logical
4804 /// buffer collection, if the existing collection's buffer counts,
4805 /// constraints, and participants allow.
4806 ///
4807 /// This can be useful in replacing a failed participant, and/or in
4808 /// adding/re-adding a participant after buffers have already been
4809 /// allocated.
4810 ///
4811 /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
4812 /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
4813 /// goes through the normal procedure of setting constraints or closing
4814 /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
4815 /// clients' point of view, despite the possibility that all the buffers
4816 /// were actually allocated previously. This process is called "logical
4817 /// allocation". Most instances of "allocation" in docs for other messages
4818 /// can also be read as "allocation or logical allocation" while remaining
4819 /// valid, but we just say "allocation" in most places for brevity/clarity
4820 /// of explanation, with the details of "logical allocation" left for the
4821 /// docs here on `AttachToken`.
4822 ///
4823 /// Failure of an attached `Node` does not propagate to the parent of the
4824 /// attached `Node`. More generally, failure of a child `Node` is blocked
4825 /// from reaching its parent `Node` if the child is attached, or if the
4826 /// child is dispensable and the failure occurred after logical allocation
4827 /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
4828 ///
4829 /// A participant may in some scenarios choose to initially use a
4830 /// dispensable token for a given instance of a delegate participant, and
4831 /// then later if the first instance of that delegate participant fails, a
4832 /// new second instance of that delegate participant my be given a token
4833 /// created with `AttachToken`.
4834 ///
4835 /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
4836 /// client end, the token acts like any other token. The client can
4837 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
4838 /// and can send the token to a different process/participant. The
4839 /// `BufferCollectionToken` `Node` should be converted to a
4840 /// `BufferCollection` `Node` as normal by sending
4841 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
4842 /// without causing subtree failure by sending
4843 /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
4844 /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
4845 /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
4846 /// the `BufferCollection`.
4847 ///
4848 /// Within the subtree, a success result from
4849 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
4850 /// the subtree participants' constraints were satisfiable using the
4851 /// already-existing buffer collection, the already-established
4852 /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
4853 /// constraints, and the already-existing other participants (already added
4854 /// via successful logical allocation) and their specified buffer counts in
4855 /// their constraints. A failure result means the new participants'
4856 /// constraints cannot be satisfied using the existing buffer collection and
4857 /// its already-added participants. Creating a new collection instead may
4858 /// allow all participants' constraints to be satisfied, assuming
4859 /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
4860 /// used.
4861 ///
4862 /// A token created with `AttachToken` performs constraints aggregation with
4863 /// all constraints currently in effect on the buffer collection, plus the
4864 /// attached token under consideration plus child tokens under the attached
4865 /// token which are not themselves an attached token or under such a token.
4866 /// Further subtrees under this subtree are considered for logical
4867 /// allocation only after this subtree has completed logical allocation.
4868 ///
4869 /// Assignment of existing buffers to participants'
4870 /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
4871 /// etc is first-come first-served, but a child can't logically allocate
4872 /// before all its parents have sent `SetConstraints`.
4873 ///
4874 /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
4875 /// in contrast to `AttachToken`, has the created token `Node` + child
4876 /// `Node`(s) (in the created subtree but not in any subtree under this
4877 /// subtree) participate in constraints aggregation along with its parent
4878 /// during the parent's allocation or logical allocation.
4879 ///
4880 /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
4881 /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
4882 /// sysmem before the new token can be passed to `BindSharedCollection`. The
4883 /// `Sync` of the new token can be accomplished with
4884 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
4885 /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
4886 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
4887 /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
4888 /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
4889 /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
4890 /// created token, to also sync those additional tokens to sysmem using a
4891 /// single round-trip.
4892 ///
4893 /// All table fields are currently required.
4894 ///
4895 /// + request `rights_attentuation_mask` This allows attenuating the VMO
4896 /// rights of the subtree. These values for `rights_attenuation_mask`
4897 /// result in no attenuation (note that 0 is not on this list):
4898 /// + ZX_RIGHT_SAME_RIGHTS (preferred)
4899 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
4900 /// + request `token_request` The server end of the `BufferCollectionToken`
4901 /// channel. The client retains the client end.
4902 AttachToken {
4903 payload: BufferCollectionAttachTokenRequest,
4904 control_handle: BufferCollectionControlHandle,
4905 },
4906 /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
4907 /// buffers have been allocated and only the specified number of buffers (or
4908 /// fewer) remain in the buffer collection.
4909 ///
4910 /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
4911 /// client to wait until an old buffer collection is fully or mostly
4912 /// deallocated before attempting allocation of a new buffer collection. The
4913 /// eventpair is only signalled when the buffers of this collection have
4914 /// been fully deallocated (not just un-referenced by clients, but all the
4915 /// memory consumed by those buffers has been fully reclaimed/recycled), or
4916 /// when allocation or logical allocation fails for the tree or subtree
4917 /// including this [`fuchsia.sysmem2/BufferCollection`].
4918 ///
4919 /// The eventpair won't be signalled until allocation or logical allocation
4920 /// has completed; until then, the collection's current buffer count is
4921 /// ignored.
4922 ///
4923 /// If logical allocation fails for an attached subtree (using
4924 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
4925 /// eventpair will close during that failure regardless of the number of
4926 /// buffers potenitally allocated in the overall buffer collection. This is
4927 /// for logical allocation consistency with normal allocation.
4928 ///
4929 /// The lifetime signalled by this event includes asynchronous cleanup of
4930 /// allocated buffers, and this asynchronous cleanup cannot occur until all
4931 /// holders of VMO handles to the buffers have closed those VMO handles.
4932 /// Therefore, clients should take care not to become blocked forever
4933 /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
4934 /// participants using the logical buffer collection (including the waiter
4935 /// itself) are less trusted, less reliable, or potentially blocked by the
4936 /// wait itself. Waiting asynchronously is recommended. Setting a deadline
4937 /// for the client wait may be prudent, depending on details of how the
4938 /// collection and/or its VMOs are used or shared. Failure to allocate a
4939 /// new/replacement buffer collection is better than getting stuck forever.
4940 ///
4941 /// The sysmem server itself intentionally does not perform any waiting on
4942 /// already-failed collections' VMOs to finish cleaning up before attempting
4943 /// a new allocation, and the sysmem server intentionally doesn't retry
4944 /// allocation if a new allocation fails due to out of memory, even if that
4945 /// failure is potentially due to continued existence of an old collection's
4946 /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
4947 /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
4948 /// as long as the waiting client is careful to not create a deadlock.
4949 ///
4950 /// Continued existence of old collections that are still cleaning up is not
4951 /// the only reason that a new allocation may fail due to insufficient
4952 /// memory, even if the new allocation is allocating physically contiguous
4953 /// buffers. Overall system memory pressure can also be the cause of failure
4954 /// to allocate a new collection. See also
4955 /// [`fuchsia.memorypressure/Provider`].
4956 ///
4957 /// `AttachLifetimeTracking` is meant to be compatible with other protocols
4958 /// with a similar `AttachLifetimeTracking` message; duplicates of the same
4959 /// `eventpair` handle (server end) can be sent via more than one
4960 /// `AttachLifetimeTracking` message to different protocols, and the
4961 /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
4962 /// the conditions are met (all holders of duplicates have closed their
4963 /// server end handle(s)). Also, thanks to how eventpair endponts work, the
4964 /// client end can (also) be duplicated without preventing the
4965 /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
4966 ///
4967 /// The server intentionally doesn't "trust" any signals set on the
4968 /// `server_end`. This mechanism intentionally uses only
4969 /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
4970 /// "early", and is only set when all handles to the server end eventpair
4971 /// are closed. No meaning is associated with any of the other signals, and
4972 /// clients should ignore any other signal bits on either end of the
4973 /// `eventpair`.
4974 ///
4975 /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
4976 /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
4977 /// transfer without causing `BufferCollection` channel failure).
4978 ///
4979 /// All table fields are currently required.
4980 ///
4981 /// + request `server_end` This eventpair handle will be closed by the
4982 /// sysmem server when buffers have been allocated initially and the
4983 /// number of buffers is then less than or equal to `buffers_remaining`.
4984 /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
4985 /// fewer) buffers to be fully deallocated. A number greater than zero can
4986 /// be useful in situations where a known number of buffers are
4987 /// intentionally not closed so that the data can continue to be used,
4988 /// such as for keeping the last available video frame displayed in the UI
4989 /// even if the video stream was using protected output buffers. It's
4990 /// outside the scope of the `BufferCollection` interface (at least for
4991 /// now) to determine how many buffers may be held without closing, but
4992 /// it'll typically be in the range 0-2.
4993 AttachLifetimeTracking {
4994 payload: BufferCollectionAttachLifetimeTrackingRequest,
4995 control_handle: BufferCollectionControlHandle,
4996 },
4997 /// An interaction was received which does not match any known method.
4998 #[non_exhaustive]
4999 _UnknownMethod {
5000 /// Ordinal of the method that was called.
5001 ordinal: u64,
5002 control_handle: BufferCollectionControlHandle,
5003 method_type: fidl::MethodType,
5004 },
5005}
5006
5007impl BufferCollectionRequest {
5008 #[allow(irrefutable_let_patterns)]
5009 pub fn into_sync(self) -> Option<(BufferCollectionSyncResponder)> {
5010 if let BufferCollectionRequest::Sync { responder } = self {
5011 Some((responder))
5012 } else {
5013 None
5014 }
5015 }
5016
5017 #[allow(irrefutable_let_patterns)]
5018 pub fn into_release(self) -> Option<(BufferCollectionControlHandle)> {
5019 if let BufferCollectionRequest::Release { control_handle } = self {
5020 Some((control_handle))
5021 } else {
5022 None
5023 }
5024 }
5025
5026 #[allow(irrefutable_let_patterns)]
5027 pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionControlHandle)> {
5028 if let BufferCollectionRequest::SetName { payload, control_handle } = self {
5029 Some((payload, control_handle))
5030 } else {
5031 None
5032 }
5033 }
5034
5035 #[allow(irrefutable_let_patterns)]
5036 pub fn into_set_debug_client_info(
5037 self,
5038 ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionControlHandle)> {
5039 if let BufferCollectionRequest::SetDebugClientInfo { payload, control_handle } = self {
5040 Some((payload, control_handle))
5041 } else {
5042 None
5043 }
5044 }
5045
5046 #[allow(irrefutable_let_patterns)]
5047 pub fn into_set_debug_timeout_log_deadline(
5048 self,
5049 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionControlHandle)> {
5050 if let BufferCollectionRequest::SetDebugTimeoutLogDeadline { payload, control_handle } =
5051 self
5052 {
5053 Some((payload, control_handle))
5054 } else {
5055 None
5056 }
5057 }
5058
5059 #[allow(irrefutable_let_patterns)]
5060 pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionControlHandle)> {
5061 if let BufferCollectionRequest::SetVerboseLogging { control_handle } = self {
5062 Some((control_handle))
5063 } else {
5064 None
5065 }
5066 }
5067
5068 #[allow(irrefutable_let_patterns)]
5069 pub fn into_get_node_ref(self) -> Option<(BufferCollectionGetNodeRefResponder)> {
5070 if let BufferCollectionRequest::GetNodeRef { responder } = self {
5071 Some((responder))
5072 } else {
5073 None
5074 }
5075 }
5076
5077 #[allow(irrefutable_let_patterns)]
5078 pub fn into_is_alternate_for(
5079 self,
5080 ) -> Option<(NodeIsAlternateForRequest, BufferCollectionIsAlternateForResponder)> {
5081 if let BufferCollectionRequest::IsAlternateFor { payload, responder } = self {
5082 Some((payload, responder))
5083 } else {
5084 None
5085 }
5086 }
5087
5088 #[allow(irrefutable_let_patterns)]
5089 pub fn into_get_buffer_collection_id(
5090 self,
5091 ) -> Option<(BufferCollectionGetBufferCollectionIdResponder)> {
5092 if let BufferCollectionRequest::GetBufferCollectionId { responder } = self {
5093 Some((responder))
5094 } else {
5095 None
5096 }
5097 }
5098
5099 #[allow(irrefutable_let_patterns)]
5100 pub fn into_set_weak(self) -> Option<(BufferCollectionControlHandle)> {
5101 if let BufferCollectionRequest::SetWeak { control_handle } = self {
5102 Some((control_handle))
5103 } else {
5104 None
5105 }
5106 }
5107
5108 #[allow(irrefutable_let_patterns)]
5109 pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, BufferCollectionControlHandle)> {
5110 if let BufferCollectionRequest::SetWeakOk { payload, control_handle } = self {
5111 Some((payload, control_handle))
5112 } else {
5113 None
5114 }
5115 }
5116
5117 #[allow(irrefutable_let_patterns)]
5118 pub fn into_attach_node_tracking(
5119 self,
5120 ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionControlHandle)> {
5121 if let BufferCollectionRequest::AttachNodeTracking { payload, control_handle } = self {
5122 Some((payload, control_handle))
5123 } else {
5124 None
5125 }
5126 }
5127
5128 #[allow(irrefutable_let_patterns)]
5129 pub fn into_set_constraints(
5130 self,
5131 ) -> Option<(BufferCollectionSetConstraintsRequest, BufferCollectionControlHandle)> {
5132 if let BufferCollectionRequest::SetConstraints { payload, control_handle } = self {
5133 Some((payload, control_handle))
5134 } else {
5135 None
5136 }
5137 }
5138
5139 #[allow(irrefutable_let_patterns)]
5140 pub fn into_wait_for_all_buffers_allocated(
5141 self,
5142 ) -> Option<(BufferCollectionWaitForAllBuffersAllocatedResponder)> {
5143 if let BufferCollectionRequest::WaitForAllBuffersAllocated { responder } = self {
5144 Some((responder))
5145 } else {
5146 None
5147 }
5148 }
5149
5150 #[allow(irrefutable_let_patterns)]
5151 pub fn into_check_all_buffers_allocated(
5152 self,
5153 ) -> Option<(BufferCollectionCheckAllBuffersAllocatedResponder)> {
5154 if let BufferCollectionRequest::CheckAllBuffersAllocated { responder } = self {
5155 Some((responder))
5156 } else {
5157 None
5158 }
5159 }
5160
5161 #[allow(irrefutable_let_patterns)]
5162 pub fn into_attach_token(
5163 self,
5164 ) -> Option<(BufferCollectionAttachTokenRequest, BufferCollectionControlHandle)> {
5165 if let BufferCollectionRequest::AttachToken { payload, control_handle } = self {
5166 Some((payload, control_handle))
5167 } else {
5168 None
5169 }
5170 }
5171
5172 #[allow(irrefutable_let_patterns)]
5173 pub fn into_attach_lifetime_tracking(
5174 self,
5175 ) -> Option<(BufferCollectionAttachLifetimeTrackingRequest, BufferCollectionControlHandle)>
5176 {
5177 if let BufferCollectionRequest::AttachLifetimeTracking { payload, control_handle } = self {
5178 Some((payload, control_handle))
5179 } else {
5180 None
5181 }
5182 }
5183
5184 /// Name of the method defined in FIDL
5185 pub fn method_name(&self) -> &'static str {
5186 match *self {
5187 BufferCollectionRequest::Sync { .. } => "sync",
5188 BufferCollectionRequest::Release { .. } => "release",
5189 BufferCollectionRequest::SetName { .. } => "set_name",
5190 BufferCollectionRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
5191 BufferCollectionRequest::SetDebugTimeoutLogDeadline { .. } => {
5192 "set_debug_timeout_log_deadline"
5193 }
5194 BufferCollectionRequest::SetVerboseLogging { .. } => "set_verbose_logging",
5195 BufferCollectionRequest::GetNodeRef { .. } => "get_node_ref",
5196 BufferCollectionRequest::IsAlternateFor { .. } => "is_alternate_for",
5197 BufferCollectionRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
5198 BufferCollectionRequest::SetWeak { .. } => "set_weak",
5199 BufferCollectionRequest::SetWeakOk { .. } => "set_weak_ok",
5200 BufferCollectionRequest::AttachNodeTracking { .. } => "attach_node_tracking",
5201 BufferCollectionRequest::SetConstraints { .. } => "set_constraints",
5202 BufferCollectionRequest::WaitForAllBuffersAllocated { .. } => {
5203 "wait_for_all_buffers_allocated"
5204 }
5205 BufferCollectionRequest::CheckAllBuffersAllocated { .. } => {
5206 "check_all_buffers_allocated"
5207 }
5208 BufferCollectionRequest::AttachToken { .. } => "attach_token",
5209 BufferCollectionRequest::AttachLifetimeTracking { .. } => "attach_lifetime_tracking",
5210 BufferCollectionRequest::_UnknownMethod {
5211 method_type: fidl::MethodType::OneWay,
5212 ..
5213 } => "unknown one-way method",
5214 BufferCollectionRequest::_UnknownMethod {
5215 method_type: fidl::MethodType::TwoWay,
5216 ..
5217 } => "unknown two-way method",
5218 }
5219 }
5220}
5221
5222#[derive(Debug, Clone)]
5223pub struct BufferCollectionControlHandle {
5224 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
5225}
5226
5227impl fidl::endpoints::ControlHandle for BufferCollectionControlHandle {
5228 fn shutdown(&self) {
5229 self.inner.shutdown()
5230 }
5231
5232 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
5233 self.inner.shutdown_with_epitaph(status)
5234 }
5235
5236 fn is_closed(&self) -> bool {
5237 self.inner.channel().is_closed()
5238 }
5239 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
5240 self.inner.channel().on_closed()
5241 }
5242
5243 #[cfg(target_os = "fuchsia")]
5244 fn signal_peer(
5245 &self,
5246 clear_mask: zx::Signals,
5247 set_mask: zx::Signals,
5248 ) -> Result<(), zx_status::Status> {
5249 use fidl::Peered;
5250 self.inner.channel().signal_peer(clear_mask, set_mask)
5251 }
5252}
5253
5254impl BufferCollectionControlHandle {}
5255
5256#[must_use = "FIDL methods require a response to be sent"]
5257#[derive(Debug)]
5258pub struct BufferCollectionSyncResponder {
5259 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5260 tx_id: u32,
5261}
5262
5263/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5264/// if the responder is dropped without sending a response, so that the client
5265/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5266impl std::ops::Drop for BufferCollectionSyncResponder {
5267 fn drop(&mut self) {
5268 self.control_handle.shutdown();
5269 // Safety: drops once, never accessed again
5270 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5271 }
5272}
5273
5274impl fidl::endpoints::Responder for BufferCollectionSyncResponder {
5275 type ControlHandle = BufferCollectionControlHandle;
5276
5277 fn control_handle(&self) -> &BufferCollectionControlHandle {
5278 &self.control_handle
5279 }
5280
5281 fn drop_without_shutdown(mut self) {
5282 // Safety: drops once, never accessed again due to mem::forget
5283 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5284 // Prevent Drop from running (which would shut down the channel)
5285 std::mem::forget(self);
5286 }
5287}
5288
5289impl BufferCollectionSyncResponder {
5290 /// Sends a response to the FIDL transaction.
5291 ///
5292 /// Sets the channel to shutdown if an error occurs.
5293 pub fn send(self) -> Result<(), fidl::Error> {
5294 let _result = self.send_raw();
5295 if _result.is_err() {
5296 self.control_handle.shutdown();
5297 }
5298 self.drop_without_shutdown();
5299 _result
5300 }
5301
5302 /// Similar to "send" but does not shutdown the channel if an error occurs.
5303 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
5304 let _result = self.send_raw();
5305 self.drop_without_shutdown();
5306 _result
5307 }
5308
5309 fn send_raw(&self) -> Result<(), fidl::Error> {
5310 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
5311 fidl::encoding::Flexible::new(()),
5312 self.tx_id,
5313 0x11ac2555cf575b54,
5314 fidl::encoding::DynamicFlags::FLEXIBLE,
5315 )
5316 }
5317}
5318
5319#[must_use = "FIDL methods require a response to be sent"]
5320#[derive(Debug)]
5321pub struct BufferCollectionGetNodeRefResponder {
5322 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5323 tx_id: u32,
5324}
5325
5326/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5327/// if the responder is dropped without sending a response, so that the client
5328/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5329impl std::ops::Drop for BufferCollectionGetNodeRefResponder {
5330 fn drop(&mut self) {
5331 self.control_handle.shutdown();
5332 // Safety: drops once, never accessed again
5333 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5334 }
5335}
5336
5337impl fidl::endpoints::Responder for BufferCollectionGetNodeRefResponder {
5338 type ControlHandle = BufferCollectionControlHandle;
5339
5340 fn control_handle(&self) -> &BufferCollectionControlHandle {
5341 &self.control_handle
5342 }
5343
5344 fn drop_without_shutdown(mut self) {
5345 // Safety: drops once, never accessed again due to mem::forget
5346 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5347 // Prevent Drop from running (which would shut down the channel)
5348 std::mem::forget(self);
5349 }
5350}
5351
5352impl BufferCollectionGetNodeRefResponder {
5353 /// Sends a response to the FIDL transaction.
5354 ///
5355 /// Sets the channel to shutdown if an error occurs.
5356 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5357 let _result = self.send_raw(payload);
5358 if _result.is_err() {
5359 self.control_handle.shutdown();
5360 }
5361 self.drop_without_shutdown();
5362 _result
5363 }
5364
5365 /// Similar to "send" but does not shutdown the channel if an error occurs.
5366 pub fn send_no_shutdown_on_err(
5367 self,
5368 mut payload: NodeGetNodeRefResponse,
5369 ) -> Result<(), fidl::Error> {
5370 let _result = self.send_raw(payload);
5371 self.drop_without_shutdown();
5372 _result
5373 }
5374
5375 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5376 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
5377 fidl::encoding::Flexible::new(&mut payload),
5378 self.tx_id,
5379 0x5b3d0e51614df053,
5380 fidl::encoding::DynamicFlags::FLEXIBLE,
5381 )
5382 }
5383}
5384
5385#[must_use = "FIDL methods require a response to be sent"]
5386#[derive(Debug)]
5387pub struct BufferCollectionIsAlternateForResponder {
5388 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5389 tx_id: u32,
5390}
5391
5392/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5393/// if the responder is dropped without sending a response, so that the client
5394/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5395impl std::ops::Drop for BufferCollectionIsAlternateForResponder {
5396 fn drop(&mut self) {
5397 self.control_handle.shutdown();
5398 // Safety: drops once, never accessed again
5399 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5400 }
5401}
5402
5403impl fidl::endpoints::Responder for BufferCollectionIsAlternateForResponder {
5404 type ControlHandle = BufferCollectionControlHandle;
5405
5406 fn control_handle(&self) -> &BufferCollectionControlHandle {
5407 &self.control_handle
5408 }
5409
5410 fn drop_without_shutdown(mut self) {
5411 // Safety: drops once, never accessed again due to mem::forget
5412 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5413 // Prevent Drop from running (which would shut down the channel)
5414 std::mem::forget(self);
5415 }
5416}
5417
5418impl BufferCollectionIsAlternateForResponder {
5419 /// Sends a response to the FIDL transaction.
5420 ///
5421 /// Sets the channel to shutdown if an error occurs.
5422 pub fn send(
5423 self,
5424 mut result: Result<&NodeIsAlternateForResponse, Error>,
5425 ) -> Result<(), fidl::Error> {
5426 let _result = self.send_raw(result);
5427 if _result.is_err() {
5428 self.control_handle.shutdown();
5429 }
5430 self.drop_without_shutdown();
5431 _result
5432 }
5433
5434 /// Similar to "send" but does not shutdown the channel if an error occurs.
5435 pub fn send_no_shutdown_on_err(
5436 self,
5437 mut result: Result<&NodeIsAlternateForResponse, Error>,
5438 ) -> Result<(), fidl::Error> {
5439 let _result = self.send_raw(result);
5440 self.drop_without_shutdown();
5441 _result
5442 }
5443
5444 fn send_raw(
5445 &self,
5446 mut result: Result<&NodeIsAlternateForResponse, Error>,
5447 ) -> Result<(), fidl::Error> {
5448 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5449 NodeIsAlternateForResponse,
5450 Error,
5451 >>(
5452 fidl::encoding::FlexibleResult::new(result),
5453 self.tx_id,
5454 0x3a58e00157e0825,
5455 fidl::encoding::DynamicFlags::FLEXIBLE,
5456 )
5457 }
5458}
5459
5460#[must_use = "FIDL methods require a response to be sent"]
5461#[derive(Debug)]
5462pub struct BufferCollectionGetBufferCollectionIdResponder {
5463 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5464 tx_id: u32,
5465}
5466
5467/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5468/// if the responder is dropped without sending a response, so that the client
5469/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5470impl std::ops::Drop for BufferCollectionGetBufferCollectionIdResponder {
5471 fn drop(&mut self) {
5472 self.control_handle.shutdown();
5473 // Safety: drops once, never accessed again
5474 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5475 }
5476}
5477
5478impl fidl::endpoints::Responder for BufferCollectionGetBufferCollectionIdResponder {
5479 type ControlHandle = BufferCollectionControlHandle;
5480
5481 fn control_handle(&self) -> &BufferCollectionControlHandle {
5482 &self.control_handle
5483 }
5484
5485 fn drop_without_shutdown(mut self) {
5486 // Safety: drops once, never accessed again due to mem::forget
5487 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5488 // Prevent Drop from running (which would shut down the channel)
5489 std::mem::forget(self);
5490 }
5491}
5492
5493impl BufferCollectionGetBufferCollectionIdResponder {
5494 /// Sends a response to the FIDL transaction.
5495 ///
5496 /// Sets the channel to shutdown if an error occurs.
5497 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5498 let _result = self.send_raw(payload);
5499 if _result.is_err() {
5500 self.control_handle.shutdown();
5501 }
5502 self.drop_without_shutdown();
5503 _result
5504 }
5505
5506 /// Similar to "send" but does not shutdown the channel if an error occurs.
5507 pub fn send_no_shutdown_on_err(
5508 self,
5509 mut payload: &NodeGetBufferCollectionIdResponse,
5510 ) -> Result<(), fidl::Error> {
5511 let _result = self.send_raw(payload);
5512 self.drop_without_shutdown();
5513 _result
5514 }
5515
5516 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5517 self.control_handle
5518 .inner
5519 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
5520 fidl::encoding::Flexible::new(payload),
5521 self.tx_id,
5522 0x77d19a494b78ba8c,
5523 fidl::encoding::DynamicFlags::FLEXIBLE,
5524 )
5525 }
5526}
5527
5528#[must_use = "FIDL methods require a response to be sent"]
5529#[derive(Debug)]
5530pub struct BufferCollectionWaitForAllBuffersAllocatedResponder {
5531 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5532 tx_id: u32,
5533}
5534
5535/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5536/// if the responder is dropped without sending a response, so that the client
5537/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5538impl std::ops::Drop for BufferCollectionWaitForAllBuffersAllocatedResponder {
5539 fn drop(&mut self) {
5540 self.control_handle.shutdown();
5541 // Safety: drops once, never accessed again
5542 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5543 }
5544}
5545
5546impl fidl::endpoints::Responder for BufferCollectionWaitForAllBuffersAllocatedResponder {
5547 type ControlHandle = BufferCollectionControlHandle;
5548
5549 fn control_handle(&self) -> &BufferCollectionControlHandle {
5550 &self.control_handle
5551 }
5552
5553 fn drop_without_shutdown(mut self) {
5554 // Safety: drops once, never accessed again due to mem::forget
5555 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5556 // Prevent Drop from running (which would shut down the channel)
5557 std::mem::forget(self);
5558 }
5559}
5560
5561impl BufferCollectionWaitForAllBuffersAllocatedResponder {
5562 /// Sends a response to the FIDL transaction.
5563 ///
5564 /// Sets the channel to shutdown if an error occurs.
5565 pub fn send(
5566 self,
5567 mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5568 ) -> Result<(), fidl::Error> {
5569 let _result = self.send_raw(result);
5570 if _result.is_err() {
5571 self.control_handle.shutdown();
5572 }
5573 self.drop_without_shutdown();
5574 _result
5575 }
5576
5577 /// Similar to "send" but does not shutdown the channel if an error occurs.
5578 pub fn send_no_shutdown_on_err(
5579 self,
5580 mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5581 ) -> Result<(), fidl::Error> {
5582 let _result = self.send_raw(result);
5583 self.drop_without_shutdown();
5584 _result
5585 }
5586
5587 fn send_raw(
5588 &self,
5589 mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5590 ) -> Result<(), fidl::Error> {
5591 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5592 BufferCollectionWaitForAllBuffersAllocatedResponse,
5593 Error,
5594 >>(
5595 fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
5596 self.tx_id,
5597 0x62300344b61404e,
5598 fidl::encoding::DynamicFlags::FLEXIBLE,
5599 )
5600 }
5601}
5602
5603#[must_use = "FIDL methods require a response to be sent"]
5604#[derive(Debug)]
5605pub struct BufferCollectionCheckAllBuffersAllocatedResponder {
5606 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5607 tx_id: u32,
5608}
5609
5610/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5611/// if the responder is dropped without sending a response, so that the client
5612/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5613impl std::ops::Drop for BufferCollectionCheckAllBuffersAllocatedResponder {
5614 fn drop(&mut self) {
5615 self.control_handle.shutdown();
5616 // Safety: drops once, never accessed again
5617 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5618 }
5619}
5620
5621impl fidl::endpoints::Responder for BufferCollectionCheckAllBuffersAllocatedResponder {
5622 type ControlHandle = BufferCollectionControlHandle;
5623
5624 fn control_handle(&self) -> &BufferCollectionControlHandle {
5625 &self.control_handle
5626 }
5627
5628 fn drop_without_shutdown(mut self) {
5629 // Safety: drops once, never accessed again due to mem::forget
5630 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5631 // Prevent Drop from running (which would shut down the channel)
5632 std::mem::forget(self);
5633 }
5634}
5635
5636impl BufferCollectionCheckAllBuffersAllocatedResponder {
5637 /// Sends a response to the FIDL transaction.
5638 ///
5639 /// Sets the channel to shutdown if an error occurs.
5640 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5641 let _result = self.send_raw(result);
5642 if _result.is_err() {
5643 self.control_handle.shutdown();
5644 }
5645 self.drop_without_shutdown();
5646 _result
5647 }
5648
5649 /// Similar to "send" but does not shutdown the channel if an error occurs.
5650 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5651 let _result = self.send_raw(result);
5652 self.drop_without_shutdown();
5653 _result
5654 }
5655
5656 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5657 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5658 fidl::encoding::EmptyStruct,
5659 Error,
5660 >>(
5661 fidl::encoding::FlexibleResult::new(result),
5662 self.tx_id,
5663 0x35a5fe77ce939c10,
5664 fidl::encoding::DynamicFlags::FLEXIBLE,
5665 )
5666 }
5667}
5668
5669#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
5670pub struct BufferCollectionTokenMarker;
5671
5672impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenMarker {
5673 type Proxy = BufferCollectionTokenProxy;
5674 type RequestStream = BufferCollectionTokenRequestStream;
5675 #[cfg(target_os = "fuchsia")]
5676 type SynchronousProxy = BufferCollectionTokenSynchronousProxy;
5677
5678 const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionToken";
5679}
5680
5681pub trait BufferCollectionTokenProxyInterface: Send + Sync {
5682 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
5683 fn r#sync(&self) -> Self::SyncResponseFut;
5684 fn r#release(&self) -> Result<(), fidl::Error>;
5685 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
5686 fn r#set_debug_client_info(
5687 &self,
5688 payload: &NodeSetDebugClientInfoRequest,
5689 ) -> Result<(), fidl::Error>;
5690 fn r#set_debug_timeout_log_deadline(
5691 &self,
5692 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5693 ) -> Result<(), fidl::Error>;
5694 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
5695 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
5696 + Send;
5697 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
5698 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
5699 + Send;
5700 fn r#is_alternate_for(
5701 &self,
5702 payload: NodeIsAlternateForRequest,
5703 ) -> Self::IsAlternateForResponseFut;
5704 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
5705 + Send;
5706 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
5707 fn r#set_weak(&self) -> Result<(), fidl::Error>;
5708 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
5709 fn r#attach_node_tracking(
5710 &self,
5711 payload: NodeAttachNodeTrackingRequest,
5712 ) -> Result<(), fidl::Error>;
5713 type DuplicateSyncResponseFut: std::future::Future<
5714 Output = Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error>,
5715 > + Send;
5716 fn r#duplicate_sync(
5717 &self,
5718 payload: &BufferCollectionTokenDuplicateSyncRequest,
5719 ) -> Self::DuplicateSyncResponseFut;
5720 fn r#duplicate(
5721 &self,
5722 payload: BufferCollectionTokenDuplicateRequest,
5723 ) -> Result<(), fidl::Error>;
5724 fn r#set_dispensable(&self) -> Result<(), fidl::Error>;
5725 fn r#create_buffer_collection_token_group(
5726 &self,
5727 payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
5728 ) -> Result<(), fidl::Error>;
5729}
5730#[derive(Debug)]
5731#[cfg(target_os = "fuchsia")]
5732pub struct BufferCollectionTokenSynchronousProxy {
5733 client: fidl::client::sync::Client,
5734}
5735
5736#[cfg(target_os = "fuchsia")]
5737impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenSynchronousProxy {
5738 type Proxy = BufferCollectionTokenProxy;
5739 type Protocol = BufferCollectionTokenMarker;
5740
5741 fn from_channel(inner: fidl::Channel) -> Self {
5742 Self::new(inner)
5743 }
5744
5745 fn into_channel(self) -> fidl::Channel {
5746 self.client.into_channel()
5747 }
5748
5749 fn as_channel(&self) -> &fidl::Channel {
5750 self.client.as_channel()
5751 }
5752}
5753
5754#[cfg(target_os = "fuchsia")]
5755impl BufferCollectionTokenSynchronousProxy {
5756 pub fn new(channel: fidl::Channel) -> Self {
5757 Self { client: fidl::client::sync::Client::new(channel) }
5758 }
5759
5760 pub fn into_channel(self) -> fidl::Channel {
5761 self.client.into_channel()
5762 }
5763
5764 /// Waits until an event arrives and returns it. It is safe for other
5765 /// threads to make concurrent requests while waiting for an event.
5766 pub fn wait_for_event(
5767 &self,
5768 deadline: zx::MonotonicInstant,
5769 ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
5770 BufferCollectionTokenEvent::decode(
5771 self.client.wait_for_event::<BufferCollectionTokenMarker>(deadline)?,
5772 )
5773 }
5774
5775 /// Ensure that previous messages have been received server side. This is
5776 /// particularly useful after previous messages that created new tokens,
5777 /// because a token must be known to the sysmem server before sending the
5778 /// token to another participant.
5779 ///
5780 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
5781 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
5782 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
5783 /// to mitigate the possibility of a hostile/fake
5784 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
5785 /// Another way is to pass the token to
5786 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
5787 /// the token as part of exchanging it for a
5788 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
5789 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
5790 /// of stalling.
5791 ///
5792 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
5793 /// and then starting and completing a `Sync`, it's then safe to send the
5794 /// `BufferCollectionToken` client ends to other participants knowing the
5795 /// server will recognize the tokens when they're sent by the other
5796 /// participants to sysmem in a
5797 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
5798 /// efficient way to create tokens while avoiding unnecessary round trips.
5799 ///
5800 /// Other options include waiting for each
5801 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
5802 /// individually (using separate call to `Sync` after each), or calling
5803 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
5804 /// converted to a `BufferCollection` via
5805 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
5806 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
5807 /// the sync step and can create multiple tokens at once.
5808 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
5809 let _response = self.client.send_query::<
5810 fidl::encoding::EmptyPayload,
5811 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
5812 BufferCollectionTokenMarker,
5813 >(
5814 (),
5815 0x11ac2555cf575b54,
5816 fidl::encoding::DynamicFlags::FLEXIBLE,
5817 ___deadline,
5818 )?
5819 .into_result::<BufferCollectionTokenMarker>("sync")?;
5820 Ok(_response)
5821 }
5822
5823 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
5824 ///
5825 /// Normally a participant will convert a `BufferCollectionToken` into a
5826 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
5827 /// `Release` via the token (and then close the channel immediately or
5828 /// shortly later in response to server closing the server end), which
5829 /// avoids causing buffer collection failure. Without a prior `Release`,
5830 /// closing the `BufferCollectionToken` client end will cause buffer
5831 /// collection failure.
5832 ///
5833 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
5834 ///
5835 /// By default the server handles unexpected closure of a
5836 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
5837 /// first) by failing the buffer collection. Partly this is to expedite
5838 /// closing VMO handles to reclaim memory when any participant fails. If a
5839 /// participant would like to cleanly close a `BufferCollection` without
5840 /// causing buffer collection failure, the participant can send `Release`
5841 /// before closing the `BufferCollection` client end. The `Release` can
5842 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
5843 /// buffer collection won't require constraints from this node in order to
5844 /// allocate. If after `SetConstraints`, the constraints are retained and
5845 /// aggregated, despite the lack of `BufferCollection` connection at the
5846 /// time of constraints aggregation.
5847 ///
5848 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
5849 ///
5850 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
5851 /// end (without `Release` first) will trigger failure of the buffer
5852 /// collection. To close a `BufferCollectionTokenGroup` channel without
5853 /// failing the buffer collection, ensure that AllChildrenPresent() has been
5854 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
5855 /// client end.
5856 ///
5857 /// If `Release` occurs before
5858 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
5859 /// buffer collection will fail (triggered by reception of `Release` without
5860 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
5861 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
5862 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
5863 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
5864 /// close requires `AllChildrenPresent` (if not already sent), then
5865 /// `Release`, then close client end.
5866 ///
5867 /// If `Release` occurs after `AllChildrenPresent`, the children and all
5868 /// their constraints remain intact (just as they would if the
5869 /// `BufferCollectionTokenGroup` channel had remained open), and the client
5870 /// end close doesn't trigger buffer collection failure.
5871 ///
5872 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
5873 ///
5874 /// For brevity, the per-channel-protocol paragraphs above ignore the
5875 /// separate failure domain created by
5876 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
5877 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
5878 /// unexpectedly closes (without `Release` first) and that client end is
5879 /// under a failure domain, instead of failing the whole buffer collection,
5880 /// the failure domain is failed, but the buffer collection itself is
5881 /// isolated from failure of the failure domain. Such failure domains can be
5882 /// nested, in which case only the inner-most failure domain in which the
5883 /// `Node` resides fails.
5884 pub fn r#release(&self) -> Result<(), fidl::Error> {
5885 self.client.send::<fidl::encoding::EmptyPayload>(
5886 (),
5887 0x6a5cae7d6d6e04c6,
5888 fidl::encoding::DynamicFlags::FLEXIBLE,
5889 )
5890 }
5891
5892 /// Set a name for VMOs in this buffer collection.
5893 ///
5894 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
5895 /// will be truncated to fit. The name of the vmo will be suffixed with the
5896 /// buffer index within the collection (if the suffix fits within
5897 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
5898 /// listed in the inspect data.
5899 ///
5900 /// The name only affects VMOs allocated after the name is set; this call
5901 /// does not rename existing VMOs. If multiple clients set different names
5902 /// then the larger priority value will win. Setting a new name with the
5903 /// same priority as a prior name doesn't change the name.
5904 ///
5905 /// All table fields are currently required.
5906 ///
5907 /// + request `priority` The name is only set if this is the first `SetName`
5908 /// or if `priority` is greater than any previous `priority` value in
5909 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
5910 /// + request `name` The name for VMOs created under this buffer collection.
5911 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
5912 self.client.send::<NodeSetNameRequest>(
5913 payload,
5914 0xb41f1624f48c1e9,
5915 fidl::encoding::DynamicFlags::FLEXIBLE,
5916 )
5917 }
5918
5919 /// Set information about the current client that can be used by sysmem to
5920 /// help diagnose leaking memory and allocation stalls waiting for a
5921 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
5922 ///
5923 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
5924 /// `Node`(s) derived from this `Node`, unless overriden by
5925 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
5926 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
5927 ///
5928 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
5929 /// `Allocator` is the most efficient way to ensure that all
5930 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
5931 /// set, and is also more efficient than separately sending the same debug
5932 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
5933 /// created [`fuchsia.sysmem2/Node`].
5934 ///
5935 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
5936 /// indicate which client is closing their channel first, leading to subtree
5937 /// failure (which can be normal if the purpose of the subtree is over, but
5938 /// if happening earlier than expected, the client-channel-specific name can
5939 /// help diagnose where the failure is first coming from, from sysmem's
5940 /// point of view).
5941 ///
5942 /// All table fields are currently required.
5943 ///
5944 /// + request `name` This can be an arbitrary string, but the current
5945 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
5946 /// + request `id` This can be an arbitrary id, but the current process ID
5947 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
5948 pub fn r#set_debug_client_info(
5949 &self,
5950 mut payload: &NodeSetDebugClientInfoRequest,
5951 ) -> Result<(), fidl::Error> {
5952 self.client.send::<NodeSetDebugClientInfoRequest>(
5953 payload,
5954 0x5cde8914608d99b1,
5955 fidl::encoding::DynamicFlags::FLEXIBLE,
5956 )
5957 }
5958
5959 /// Sysmem logs a warning if sysmem hasn't seen
5960 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
5961 /// within 5 seconds after creation of a new collection.
5962 ///
5963 /// Clients can call this method to change when the log is printed. If
5964 /// multiple client set the deadline, it's unspecified which deadline will
5965 /// take effect.
5966 ///
5967 /// In most cases the default works well.
5968 ///
5969 /// All table fields are currently required.
5970 ///
5971 /// + request `deadline` The time at which sysmem will start trying to log
5972 /// the warning, unless all constraints are with sysmem by then.
5973 pub fn r#set_debug_timeout_log_deadline(
5974 &self,
5975 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5976 ) -> Result<(), fidl::Error> {
5977 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
5978 payload,
5979 0x716b0af13d5c0806,
5980 fidl::encoding::DynamicFlags::FLEXIBLE,
5981 )
5982 }
5983
5984 /// This enables verbose logging for the buffer collection.
5985 ///
5986 /// Verbose logging includes constraints set via
5987 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
5988 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
5989 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
5990 /// the tree of `Node`(s).
5991 ///
5992 /// Normally sysmem prints only a single line complaint when aggregation
5993 /// fails, with just the specific detailed reason that aggregation failed,
5994 /// with little surrounding context. While this is often enough to diagnose
5995 /// a problem if only a small change was made and everything was working
5996 /// before the small change, it's often not particularly helpful for getting
5997 /// a new buffer collection to work for the first time. Especially with
5998 /// more complex trees of nodes, involving things like
5999 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
6000 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
6001 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
6002 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
6003 /// looks like and why it's failing a logical allocation, or why a tree or
6004 /// subtree is failing sooner than expected.
6005 ///
6006 /// The intent of the extra logging is to be acceptable from a performance
6007 /// point of view, under the assumption that verbose logging is only enabled
6008 /// on a low number of buffer collections. If we're not tracking down a bug,
6009 /// we shouldn't send this message.
6010 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
6011 self.client.send::<fidl::encoding::EmptyPayload>(
6012 (),
6013 0x5209c77415b4dfad,
6014 fidl::encoding::DynamicFlags::FLEXIBLE,
6015 )
6016 }
6017
6018 /// This gets a handle that can be used as a parameter to
6019 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
6020 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
6021 /// client obtained this handle from this `Node`.
6022 ///
6023 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
6024 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
6025 /// despite the two calls typically being on different channels.
6026 ///
6027 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6028 ///
6029 /// All table fields are currently required.
6030 ///
6031 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6032 /// different `Node` channel, to prove that the client obtained the handle
6033 /// from this `Node`.
6034 pub fn r#get_node_ref(
6035 &self,
6036 ___deadline: zx::MonotonicInstant,
6037 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
6038 let _response = self.client.send_query::<
6039 fidl::encoding::EmptyPayload,
6040 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
6041 BufferCollectionTokenMarker,
6042 >(
6043 (),
6044 0x5b3d0e51614df053,
6045 fidl::encoding::DynamicFlags::FLEXIBLE,
6046 ___deadline,
6047 )?
6048 .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
6049 Ok(_response)
6050 }
6051
6052 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6053 /// rooted at a different child token of a common parent
6054 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6055 /// passed-in `node_ref`.
6056 ///
6057 /// This call is for assisting with admission control de-duplication, and
6058 /// with debugging.
6059 ///
6060 /// The `node_ref` must be obtained using
6061 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6062 ///
6063 /// The `node_ref` can be a duplicated handle; it's not necessary to call
6064 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6065 ///
6066 /// If a calling token may not actually be a valid token at all due to a
6067 /// potentially hostile/untrusted provider of the token, call
6068 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6069 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6070 /// never responds due to a calling token not being a real token (not really
6071 /// talking to sysmem). Another option is to call
6072 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6073 /// which also validates the token along with converting it to a
6074 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6075 ///
6076 /// All table fields are currently required.
6077 ///
6078 /// - response `is_alternate`
6079 /// - true: The first parent node in common between the calling node and
6080 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
6081 /// that the calling `Node` and the `node_ref` `Node` will not have both
6082 /// their constraints apply - rather sysmem will choose one or the other
6083 /// of the constraints - never both. This is because only one child of
6084 /// a `BufferCollectionTokenGroup` is selected during logical
6085 /// allocation, with only that one child's subtree contributing to
6086 /// constraints aggregation.
6087 /// - false: The first parent node in common between the calling `Node`
6088 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6089 /// Currently, this means the first parent node in common is a
6090 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
6091 /// `Release`ed). This means that the calling `Node` and the `node_ref`
6092 /// `Node` may have both their constraints apply during constraints
6093 /// aggregation of the logical allocation, if both `Node`(s) are
6094 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6095 /// this case, there is no `BufferCollectionTokenGroup` that will
6096 /// directly prevent the two `Node`(s) from both being selected and
6097 /// their constraints both aggregated, but even when false, one or both
6098 /// `Node`(s) may still be eliminated from consideration if one or both
6099 /// `Node`(s) has a direct or indirect parent
6100 /// `BufferCollectionTokenGroup` which selects a child subtree other
6101 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
6102 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6103 /// associated with the same buffer collection as the calling `Node`.
6104 /// Another reason for this error is if the `node_ref` is an
6105 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6106 /// a real `node_ref` obtained from `GetNodeRef`.
6107 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6108 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6109 /// the needed rights expected on a real `node_ref`.
6110 /// * No other failing status codes are returned by this call. However,
6111 /// sysmem may add additional codes in future, so the client should have
6112 /// sensible default handling for any failing status code.
6113 pub fn r#is_alternate_for(
6114 &self,
6115 mut payload: NodeIsAlternateForRequest,
6116 ___deadline: zx::MonotonicInstant,
6117 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
6118 let _response = self.client.send_query::<
6119 NodeIsAlternateForRequest,
6120 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
6121 BufferCollectionTokenMarker,
6122 >(
6123 &mut payload,
6124 0x3a58e00157e0825,
6125 fidl::encoding::DynamicFlags::FLEXIBLE,
6126 ___deadline,
6127 )?
6128 .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
6129 Ok(_response.map(|x| x))
6130 }
6131
6132 /// Get the buffer collection ID. This ID is also available from
6133 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6134 /// within the collection).
6135 ///
6136 /// This call is mainly useful in situations where we can't convey a
6137 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6138 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6139 /// handle, which can be joined back up with a `BufferCollection` client end
6140 /// that was created via a different path. Prefer to convey a
6141 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6142 ///
6143 /// Trusting a `buffer_collection_id` value from a source other than sysmem
6144 /// is analogous to trusting a koid value from a source other than zircon.
6145 /// Both should be avoided unless really necessary, and both require
6146 /// caution. In some situations it may be reasonable to refer to a
6147 /// pre-established `BufferCollection` by `buffer_collection_id` via a
6148 /// protocol for efficiency reasons, but an incoming value purporting to be
6149 /// a `buffer_collection_id` is not sufficient alone to justify granting the
6150 /// sender of the `buffer_collection_id` any capability. The sender must
6151 /// first prove to a receiver that the sender has/had a VMO or has/had a
6152 /// `BufferCollectionToken` to the same collection by sending a handle that
6153 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6154 /// `buffer_collection_id` value. The receiver should take care to avoid
6155 /// assuming that a sender had a `BufferCollectionToken` in cases where the
6156 /// sender has only proven that the sender had a VMO.
6157 ///
6158 /// - response `buffer_collection_id` This ID is unique per buffer
6159 /// collection per boot. Each buffer is uniquely identified by the
6160 /// `buffer_collection_id` and `buffer_index` together.
6161 pub fn r#get_buffer_collection_id(
6162 &self,
6163 ___deadline: zx::MonotonicInstant,
6164 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
6165 let _response = self.client.send_query::<
6166 fidl::encoding::EmptyPayload,
6167 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
6168 BufferCollectionTokenMarker,
6169 >(
6170 (),
6171 0x77d19a494b78ba8c,
6172 fidl::encoding::DynamicFlags::FLEXIBLE,
6173 ___deadline,
6174 )?
6175 .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
6176 Ok(_response)
6177 }
6178
6179 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6180 /// created after this message to weak, which means that a client's `Node`
6181 /// client end (or a child created after this message) is not alone
6182 /// sufficient to keep allocated VMOs alive.
6183 ///
6184 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6185 /// `close_weak_asap`.
6186 ///
6187 /// This message is only permitted before the `Node` becomes ready for
6188 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6189 /// * `BufferCollectionToken`: any time
6190 /// * `BufferCollection`: before `SetConstraints`
6191 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6192 ///
6193 /// Currently, no conversion from strong `Node` to weak `Node` after ready
6194 /// for allocation is provided, but a client can simulate that by creating
6195 /// an additional `Node` before allocation and setting that additional
6196 /// `Node` to weak, and then potentially at some point later sending
6197 /// `Release` and closing the client end of the client's strong `Node`, but
6198 /// keeping the client's weak `Node`.
6199 ///
6200 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6201 /// collection failure (all `Node` client end(s) will see
6202 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6203 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6204 /// this situation until all `Node`(s) are ready for allocation. For initial
6205 /// allocation to succeed, at least one strong `Node` is required to exist
6206 /// at allocation time, but after that client receives VMO handles, that
6207 /// client can `BufferCollection.Release` and close the client end without
6208 /// causing this type of failure.
6209 ///
6210 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6211 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6212 /// separately as appropriate.
6213 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6214 self.client.send::<fidl::encoding::EmptyPayload>(
6215 (),
6216 0x22dd3ea514eeffe1,
6217 fidl::encoding::DynamicFlags::FLEXIBLE,
6218 )
6219 }
6220
6221 /// This indicates to sysmem that the client is prepared to pay attention to
6222 /// `close_weak_asap`.
6223 ///
6224 /// If sent, this message must be before
6225 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6226 ///
6227 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6228 /// send this message before `WaitForAllBuffersAllocated`, or a parent
6229 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6230 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6231 /// trigger buffer collection failure.
6232 ///
6233 /// This message is necessary because weak sysmem VMOs have not always been
6234 /// a thing, so older clients are not aware of the need to pay attention to
6235 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6236 /// sysmem weak VMO handles asap. By having this message and requiring
6237 /// participants to indicate their acceptance of this aspect of the overall
6238 /// protocol, we avoid situations where an older client is delivered a weak
6239 /// VMO without any way for sysmem to get that VMO to close quickly later
6240 /// (and on a per-buffer basis).
6241 ///
6242 /// A participant that doesn't handle `close_weak_asap` and also doesn't
6243 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
6244 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
6245 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
6246 /// same participant has a child/delegate which does retrieve VMOs, that
6247 /// child/delegate will need to send `SetWeakOk` before
6248 /// `WaitForAllBuffersAllocated`.
6249 ///
6250 /// + request `for_child_nodes_also` If present and true, this means direct
6251 /// child nodes of this node created after this message plus all
6252 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
6253 /// those nodes. Any child node of this node that was created before this
6254 /// message is not included. This setting is "sticky" in the sense that a
6255 /// subsequent `SetWeakOk` without this bool set to true does not reset
6256 /// the server-side bool. If this creates a problem for a participant, a
6257 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
6258 /// tokens instead, as appropriate. A participant should only set
6259 /// `for_child_nodes_also` true if the participant can really promise to
6260 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
6261 /// weak VMO handles held by participants holding the corresponding child
6262 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
6263 /// which are using sysmem(1) can be weak, despite the clients of those
6264 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
6265 /// direct way to find out about `close_weak_asap`. This only applies to
6266 /// descendents of this `Node` which are using sysmem(1), not to this
6267 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
6268 /// token, which will fail allocation unless an ancestor of this `Node`
6269 /// specified `for_child_nodes_also` true.
6270 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
6271 self.client.send::<NodeSetWeakOkRequest>(
6272 &mut payload,
6273 0x38a44fc4d7724be9,
6274 fidl::encoding::DynamicFlags::FLEXIBLE,
6275 )
6276 }
6277
6278 /// The server_end will be closed after this `Node` and any child nodes have
6279 /// have released their buffer counts, making those counts available for
6280 /// reservation by a different `Node` via
6281 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
6282 ///
6283 /// The `Node` buffer counts may not be released until the entire tree of
6284 /// `Node`(s) is closed or failed, because
6285 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
6286 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
6287 /// `Node` buffer counts remain reserved until the orphaned node is later
6288 /// cleaned up.
6289 ///
6290 /// If the `Node` exceeds a fairly large number of attached eventpair server
6291 /// ends, a log message will indicate this and the `Node` (and the
6292 /// appropriate) sub-tree will fail.
6293 ///
6294 /// The `server_end` will remain open when
6295 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
6296 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
6297 /// [`fuchsia.sysmem2/BufferCollection`].
6298 ///
6299 /// This message can also be used with a
6300 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6301 pub fn r#attach_node_tracking(
6302 &self,
6303 mut payload: NodeAttachNodeTrackingRequest,
6304 ) -> Result<(), fidl::Error> {
6305 self.client.send::<NodeAttachNodeTrackingRequest>(
6306 &mut payload,
6307 0x3f22f2a293d3cdac,
6308 fidl::encoding::DynamicFlags::FLEXIBLE,
6309 )
6310 }
6311
6312 /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
6313 /// one, referring to the same buffer collection.
6314 ///
6315 /// The created tokens are children of this token in the
6316 /// [`fuchsia.sysmem2/Node`] heirarchy.
6317 ///
6318 /// This method can be used to add more participants, by transferring the
6319 /// newly created tokens to additional participants.
6320 ///
6321 /// A new token will be returned for each entry in the
6322 /// `rights_attenuation_masks` array.
6323 ///
6324 /// If the called token may not actually be a valid token due to a
6325 /// potentially hostile/untrusted provider of the token, consider using
6326 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6327 /// instead of potentially getting stuck indefinitely if
6328 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
6329 /// due to the calling token not being a real token.
6330 ///
6331 /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
6332 /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
6333 /// method, because the sync step is included in this call, at the cost of a
6334 /// round trip during this call.
6335 ///
6336 /// All tokens must be turned in to sysmem via
6337 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6338 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6339 /// successfully allocate buffers (or to logically allocate buffers in the
6340 /// case of subtrees involving
6341 /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
6342 ///
6343 /// All table fields are currently required.
6344 ///
6345 /// + request `rights_attenuation_mask` In each entry of
6346 /// `rights_attenuation_masks`, rights bits that are zero will be absent
6347 /// in the buffer VMO rights obtainable via the corresponding returned
6348 /// token. This allows an initiator or intermediary participant to
6349 /// attenuate the rights available to a participant. This does not allow a
6350 /// participant to gain rights that the participant doesn't already have.
6351 /// The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
6352 /// attenuation should be applied.
6353 /// - response `tokens` The client ends of each newly created token.
6354 pub fn r#duplicate_sync(
6355 &self,
6356 mut payload: &BufferCollectionTokenDuplicateSyncRequest,
6357 ___deadline: zx::MonotonicInstant,
6358 ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
6359 let _response = self.client.send_query::<
6360 BufferCollectionTokenDuplicateSyncRequest,
6361 fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
6362 BufferCollectionTokenMarker,
6363 >(
6364 payload,
6365 0x1c1af9919d1ca45c,
6366 fidl::encoding::DynamicFlags::FLEXIBLE,
6367 ___deadline,
6368 )?
6369 .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
6370 Ok(_response)
6371 }
6372
6373 /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
6374 /// one, referring to the same buffer collection.
6375 ///
6376 /// The created token is a child of this token in the
6377 /// [`fuchsia.sysmem2/Node`] heirarchy.
6378 ///
6379 /// This method can be used to add a participant, by transferring the newly
6380 /// created token to another participant.
6381 ///
6382 /// This one-way message can be used instead of the two-way
6383 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
6384 /// performance sensitive cases where it would be undesireable to wait for
6385 /// sysmem to respond to
6386 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
6387 /// client code isn't structured to make it easy to duplicate all the needed
6388 /// tokens at once.
6389 ///
6390 /// After sending one or more `Duplicate` messages, and before sending the
6391 /// newly created child tokens to other participants (or to other
6392 /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
6393 /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
6394 /// `Sync` call can be made on the token, or on the `BufferCollection`
6395 /// obtained by passing this token to `BindSharedCollection`. Either will
6396 /// ensure that the server knows about the tokens created via `Duplicate`
6397 /// before the other participant sends the token to the server via separate
6398 /// `Allocator` channel.
6399 ///
6400 /// All tokens must be turned in via
6401 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6402 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6403 /// successfully allocate buffers.
6404 ///
6405 /// All table fields are currently required.
6406 ///
6407 /// + request `rights_attenuation_mask` The rights bits that are zero in
6408 /// this mask will be absent in the buffer VMO rights obtainable via the
6409 /// client end of `token_request`. This allows an initiator or
6410 /// intermediary participant to attenuate the rights available to a
6411 /// delegate participant. This does not allow a participant to gain rights
6412 /// that the participant doesn't already have. The value
6413 /// `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
6414 /// should be applied.
6415 /// + These values for rights_attenuation_mask result in no attenuation:
6416 /// + `ZX_RIGHT_SAME_RIGHTS` (preferred)
6417 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is
6418 /// computed)
6419 /// + 0 (deprecated - do not use 0 - an ERROR will go to the log)
6420 /// + request `token_request` is the server end of a `BufferCollectionToken`
6421 /// channel. The client end of this channel acts as another participant in
6422 /// the shared buffer collection.
6423 pub fn r#duplicate(
6424 &self,
6425 mut payload: BufferCollectionTokenDuplicateRequest,
6426 ) -> Result<(), fidl::Error> {
6427 self.client.send::<BufferCollectionTokenDuplicateRequest>(
6428 &mut payload,
6429 0x73e78f92ee7fb887,
6430 fidl::encoding::DynamicFlags::FLEXIBLE,
6431 )
6432 }
6433
6434 /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
6435 ///
6436 /// When the `BufferCollectionToken` is converted to a
6437 /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
6438 /// the `BufferCollection` also.
6439 ///
6440 /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
6441 /// client end without having sent
6442 /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
6443 /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
6444 /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
6445 /// to the root `Node`, which fails the whole buffer collection. In
6446 /// contrast, a dispensable `Node` can fail after buffers are allocated
6447 /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
6448 /// heirarchy.
6449 ///
6450 /// The dispensable `Node` participates in constraints aggregation along
6451 /// with its parent before buffer allocation. If the dispensable `Node`
6452 /// fails before buffers are allocated, the failure propagates to the
6453 /// dispensable `Node`'s parent.
6454 ///
6455 /// After buffers are allocated, failure of the dispensable `Node` (or any
6456 /// child of the dispensable `Node`) does not propagate to the dispensable
6457 /// `Node`'s parent. Failure does propagate from a normal child of a
6458 /// dispensable `Node` to the dispensable `Node`. Failure of a child is
6459 /// blocked from reaching its parent if the child is attached using
6460 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
6461 /// dispensable and the failure occurred after allocation.
6462 ///
6463 /// A dispensable `Node` can be used in cases where a participant needs to
6464 /// provide constraints, but after buffers are allocated, the participant
6465 /// can fail without causing buffer collection failure from the parent
6466 /// `Node`'s point of view.
6467 ///
6468 /// In contrast, `BufferCollection.AttachToken` can be used to create a
6469 /// `BufferCollectionToken` which does not participate in constraints
6470 /// aggregation with its parent `Node`, and whose failure at any time does
6471 /// not propagate to its parent `Node`, and whose potential delay providing
6472 /// constraints does not prevent the parent `Node` from completing its
6473 /// buffer allocation.
6474 ///
6475 /// An initiator (creator of the root `Node` using
6476 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
6477 /// scenarios choose to initially use a dispensable `Node` for a first
6478 /// instance of a participant, and then later if the first instance of that
6479 /// participant fails, a new second instance of that participant my be given
6480 /// a `BufferCollectionToken` created with `AttachToken`.
6481 ///
6482 /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
6483 /// shortly before sending the dispensable `BufferCollectionToken` to a
6484 /// delegate participant. Because `SetDispensable` prevents propagation of
6485 /// child `Node` failure to parent `Node`(s), if the client was relying on
6486 /// noticing child failure via failure of the parent `Node` retained by the
6487 /// client, the client may instead need to notice failure via other means.
6488 /// If other means aren't available/convenient, the client can instead
6489 /// retain the dispensable `Node` and create a child `Node` under that to
6490 /// send to the delegate participant, retaining this `Node` in order to
6491 /// notice failure of the subtree rooted at this `Node` via this `Node`'s
6492 /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
6493 /// (e.g. starting a new instance of the delegate participant and handing it
6494 /// a `BufferCollectionToken` created using
6495 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
6496 /// and clean up in a client-specific way).
6497 ///
6498 /// While it is possible (and potentially useful) to `SetDispensable` on a
6499 /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
6500 /// to later replace a failed dispensable `Node` that was a direct child of
6501 /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
6502 /// (since there's no `AttachToken` on a group). Instead, to enable
6503 /// `AttachToken` replacement in this case, create an additional
6504 /// non-dispensable token that's a direct child of the group and make the
6505 /// existing dispensable token a child of the additional token. This way,
6506 /// the additional token that is a direct child of the group has
6507 /// `BufferCollection.AttachToken` which can be used to replace the failed
6508 /// dispensable token.
6509 ///
6510 /// `SetDispensable` on an already-dispensable token is idempotent.
6511 pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
6512 self.client.send::<fidl::encoding::EmptyPayload>(
6513 (),
6514 0x228acf979254df8b,
6515 fidl::encoding::DynamicFlags::FLEXIBLE,
6516 )
6517 }
6518
6519 /// Create a logical OR among a set of tokens, called a
6520 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6521 ///
6522 /// Most sysmem clients and many participants don't need to care about this
6523 /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
6524 /// a participant wants to attempt to include one set of delegate
6525 /// participants, but if constraints don't combine successfully that way,
6526 /// fall back to a different (possibly overlapping) set of delegate
6527 /// participants, and/or fall back to a less demanding strategy (in terms of
6528 /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
6529 /// across all involved delegate participants). In such cases, a
6530 /// `BufferCollectionTokenGroup` is useful.
6531 ///
6532 /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
6533 /// child [`fuchsia.sysmem2/BufferCollectionToken`](s). The child tokens
6534 /// which are not selected during aggregation will fail (close), which a
6535 /// potential participant should notice when their `BufferCollection`
6536 /// channel client endpoint sees PEER_CLOSED, allowing the participant to
6537 /// clean up the speculative usage that didn't end up happening (this is
6538 /// simimlar to a normal `BufferCollection` server end closing on failure to
6539 /// allocate a logical buffer collection or later async failure of a buffer
6540 /// collection).
6541 ///
6542 /// See comments on protocol `BufferCollectionTokenGroup`.
6543 ///
6544 /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
6545 /// applied to the whole group can be achieved with a
6546 /// `BufferCollectionToken` for this purpose as a direct parent of the
6547 /// `BufferCollectionTokenGroup`.
6548 ///
6549 /// All table fields are currently required.
6550 ///
6551 /// + request `group_request` The server end of a
6552 /// `BufferCollectionTokenGroup` channel to be served by sysmem.
6553 pub fn r#create_buffer_collection_token_group(
6554 &self,
6555 mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
6556 ) -> Result<(), fidl::Error> {
6557 self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
6558 &mut payload,
6559 0x30f8d48e77bd36f2,
6560 fidl::encoding::DynamicFlags::FLEXIBLE,
6561 )
6562 }
6563}
6564
6565#[cfg(target_os = "fuchsia")]
6566impl From<BufferCollectionTokenSynchronousProxy> for zx::NullableHandle {
6567 fn from(value: BufferCollectionTokenSynchronousProxy) -> Self {
6568 value.into_channel().into()
6569 }
6570}
6571
6572#[cfg(target_os = "fuchsia")]
6573impl From<fidl::Channel> for BufferCollectionTokenSynchronousProxy {
6574 fn from(value: fidl::Channel) -> Self {
6575 Self::new(value)
6576 }
6577}
6578
6579#[cfg(target_os = "fuchsia")]
6580impl fidl::endpoints::FromClient for BufferCollectionTokenSynchronousProxy {
6581 type Protocol = BufferCollectionTokenMarker;
6582
6583 fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>) -> Self {
6584 Self::new(value.into_channel())
6585 }
6586}
6587
6588#[derive(Debug, Clone)]
6589pub struct BufferCollectionTokenProxy {
6590 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
6591}
6592
6593impl fidl::endpoints::Proxy for BufferCollectionTokenProxy {
6594 type Protocol = BufferCollectionTokenMarker;
6595
6596 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
6597 Self::new(inner)
6598 }
6599
6600 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
6601 self.client.into_channel().map_err(|client| Self { client })
6602 }
6603
6604 fn as_channel(&self) -> &::fidl::AsyncChannel {
6605 self.client.as_channel()
6606 }
6607}
6608
6609impl BufferCollectionTokenProxy {
6610 /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionToken.
6611 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
6612 let protocol_name =
6613 <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
6614 Self { client: fidl::client::Client::new(channel, protocol_name) }
6615 }
6616
6617 /// Get a Stream of events from the remote end of the protocol.
6618 ///
6619 /// # Panics
6620 ///
6621 /// Panics if the event stream was already taken.
6622 pub fn take_event_stream(&self) -> BufferCollectionTokenEventStream {
6623 BufferCollectionTokenEventStream { event_receiver: self.client.take_event_receiver() }
6624 }
6625
6626 /// Ensure that previous messages have been received server side. This is
6627 /// particularly useful after previous messages that created new tokens,
6628 /// because a token must be known to the sysmem server before sending the
6629 /// token to another participant.
6630 ///
6631 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
6632 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
6633 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
6634 /// to mitigate the possibility of a hostile/fake
6635 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
6636 /// Another way is to pass the token to
6637 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
6638 /// the token as part of exchanging it for a
6639 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
6640 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
6641 /// of stalling.
6642 ///
6643 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
6644 /// and then starting and completing a `Sync`, it's then safe to send the
6645 /// `BufferCollectionToken` client ends to other participants knowing the
6646 /// server will recognize the tokens when they're sent by the other
6647 /// participants to sysmem in a
6648 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
6649 /// efficient way to create tokens while avoiding unnecessary round trips.
6650 ///
6651 /// Other options include waiting for each
6652 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
6653 /// individually (using separate call to `Sync` after each), or calling
6654 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
6655 /// converted to a `BufferCollection` via
6656 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
6657 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
6658 /// the sync step and can create multiple tokens at once.
6659 pub fn r#sync(
6660 &self,
6661 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
6662 BufferCollectionTokenProxyInterface::r#sync(self)
6663 }
6664
6665 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
6666 ///
6667 /// Normally a participant will convert a `BufferCollectionToken` into a
6668 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
6669 /// `Release` via the token (and then close the channel immediately or
6670 /// shortly later in response to server closing the server end), which
6671 /// avoids causing buffer collection failure. Without a prior `Release`,
6672 /// closing the `BufferCollectionToken` client end will cause buffer
6673 /// collection failure.
6674 ///
6675 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
6676 ///
6677 /// By default the server handles unexpected closure of a
6678 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
6679 /// first) by failing the buffer collection. Partly this is to expedite
6680 /// closing VMO handles to reclaim memory when any participant fails. If a
6681 /// participant would like to cleanly close a `BufferCollection` without
6682 /// causing buffer collection failure, the participant can send `Release`
6683 /// before closing the `BufferCollection` client end. The `Release` can
6684 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
6685 /// buffer collection won't require constraints from this node in order to
6686 /// allocate. If after `SetConstraints`, the constraints are retained and
6687 /// aggregated, despite the lack of `BufferCollection` connection at the
6688 /// time of constraints aggregation.
6689 ///
6690 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
6691 ///
6692 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
6693 /// end (without `Release` first) will trigger failure of the buffer
6694 /// collection. To close a `BufferCollectionTokenGroup` channel without
6695 /// failing the buffer collection, ensure that AllChildrenPresent() has been
6696 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
6697 /// client end.
6698 ///
6699 /// If `Release` occurs before
6700 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
6701 /// buffer collection will fail (triggered by reception of `Release` without
6702 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
6703 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
6704 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
6705 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
6706 /// close requires `AllChildrenPresent` (if not already sent), then
6707 /// `Release`, then close client end.
6708 ///
6709 /// If `Release` occurs after `AllChildrenPresent`, the children and all
6710 /// their constraints remain intact (just as they would if the
6711 /// `BufferCollectionTokenGroup` channel had remained open), and the client
6712 /// end close doesn't trigger buffer collection failure.
6713 ///
6714 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
6715 ///
6716 /// For brevity, the per-channel-protocol paragraphs above ignore the
6717 /// separate failure domain created by
6718 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
6719 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
6720 /// unexpectedly closes (without `Release` first) and that client end is
6721 /// under a failure domain, instead of failing the whole buffer collection,
6722 /// the failure domain is failed, but the buffer collection itself is
6723 /// isolated from failure of the failure domain. Such failure domains can be
6724 /// nested, in which case only the inner-most failure domain in which the
6725 /// `Node` resides fails.
6726 pub fn r#release(&self) -> Result<(), fidl::Error> {
6727 BufferCollectionTokenProxyInterface::r#release(self)
6728 }
6729
6730 /// Set a name for VMOs in this buffer collection.
6731 ///
6732 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
6733 /// will be truncated to fit. The name of the vmo will be suffixed with the
6734 /// buffer index within the collection (if the suffix fits within
6735 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
6736 /// listed in the inspect data.
6737 ///
6738 /// The name only affects VMOs allocated after the name is set; this call
6739 /// does not rename existing VMOs. If multiple clients set different names
6740 /// then the larger priority value will win. Setting a new name with the
6741 /// same priority as a prior name doesn't change the name.
6742 ///
6743 /// All table fields are currently required.
6744 ///
6745 /// + request `priority` The name is only set if this is the first `SetName`
6746 /// or if `priority` is greater than any previous `priority` value in
6747 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
6748 /// + request `name` The name for VMOs created under this buffer collection.
6749 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
6750 BufferCollectionTokenProxyInterface::r#set_name(self, payload)
6751 }
6752
6753 /// Set information about the current client that can be used by sysmem to
6754 /// help diagnose leaking memory and allocation stalls waiting for a
6755 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
6756 ///
6757 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
6758 /// `Node`(s) derived from this `Node`, unless overriden by
6759 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
6760 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
6761 ///
6762 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
6763 /// `Allocator` is the most efficient way to ensure that all
6764 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
6765 /// set, and is also more efficient than separately sending the same debug
6766 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
6767 /// created [`fuchsia.sysmem2/Node`].
6768 ///
6769 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
6770 /// indicate which client is closing their channel first, leading to subtree
6771 /// failure (which can be normal if the purpose of the subtree is over, but
6772 /// if happening earlier than expected, the client-channel-specific name can
6773 /// help diagnose where the failure is first coming from, from sysmem's
6774 /// point of view).
6775 ///
6776 /// All table fields are currently required.
6777 ///
6778 /// + request `name` This can be an arbitrary string, but the current
6779 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
6780 /// + request `id` This can be an arbitrary id, but the current process ID
6781 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
6782 pub fn r#set_debug_client_info(
6783 &self,
6784 mut payload: &NodeSetDebugClientInfoRequest,
6785 ) -> Result<(), fidl::Error> {
6786 BufferCollectionTokenProxyInterface::r#set_debug_client_info(self, payload)
6787 }
6788
6789 /// Sysmem logs a warning if sysmem hasn't seen
6790 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
6791 /// within 5 seconds after creation of a new collection.
6792 ///
6793 /// Clients can call this method to change when the log is printed. If
6794 /// multiple client set the deadline, it's unspecified which deadline will
6795 /// take effect.
6796 ///
6797 /// In most cases the default works well.
6798 ///
6799 /// All table fields are currently required.
6800 ///
6801 /// + request `deadline` The time at which sysmem will start trying to log
6802 /// the warning, unless all constraints are with sysmem by then.
6803 pub fn r#set_debug_timeout_log_deadline(
6804 &self,
6805 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
6806 ) -> Result<(), fidl::Error> {
6807 BufferCollectionTokenProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
6808 }
6809
6810 /// This enables verbose logging for the buffer collection.
6811 ///
6812 /// Verbose logging includes constraints set via
6813 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
6814 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
6815 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
6816 /// the tree of `Node`(s).
6817 ///
6818 /// Normally sysmem prints only a single line complaint when aggregation
6819 /// fails, with just the specific detailed reason that aggregation failed,
6820 /// with little surrounding context. While this is often enough to diagnose
6821 /// a problem if only a small change was made and everything was working
6822 /// before the small change, it's often not particularly helpful for getting
6823 /// a new buffer collection to work for the first time. Especially with
6824 /// more complex trees of nodes, involving things like
6825 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
6826 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
6827 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
6828 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
6829 /// looks like and why it's failing a logical allocation, or why a tree or
6830 /// subtree is failing sooner than expected.
6831 ///
6832 /// The intent of the extra logging is to be acceptable from a performance
6833 /// point of view, under the assumption that verbose logging is only enabled
6834 /// on a low number of buffer collections. If we're not tracking down a bug,
6835 /// we shouldn't send this message.
6836 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
6837 BufferCollectionTokenProxyInterface::r#set_verbose_logging(self)
6838 }
6839
6840 /// This gets a handle that can be used as a parameter to
6841 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
6842 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
6843 /// client obtained this handle from this `Node`.
6844 ///
6845 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
6846 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
6847 /// despite the two calls typically being on different channels.
6848 ///
6849 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6850 ///
6851 /// All table fields are currently required.
6852 ///
6853 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6854 /// different `Node` channel, to prove that the client obtained the handle
6855 /// from this `Node`.
6856 pub fn r#get_node_ref(
6857 &self,
6858 ) -> fidl::client::QueryResponseFut<
6859 NodeGetNodeRefResponse,
6860 fidl::encoding::DefaultFuchsiaResourceDialect,
6861 > {
6862 BufferCollectionTokenProxyInterface::r#get_node_ref(self)
6863 }
6864
6865 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6866 /// rooted at a different child token of a common parent
6867 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6868 /// passed-in `node_ref`.
6869 ///
6870 /// This call is for assisting with admission control de-duplication, and
6871 /// with debugging.
6872 ///
6873 /// The `node_ref` must be obtained using
6874 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6875 ///
6876 /// The `node_ref` can be a duplicated handle; it's not necessary to call
6877 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6878 ///
6879 /// If a calling token may not actually be a valid token at all due to a
6880 /// potentially hostile/untrusted provider of the token, call
6881 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6882 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6883 /// never responds due to a calling token not being a real token (not really
6884 /// talking to sysmem). Another option is to call
6885 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6886 /// which also validates the token along with converting it to a
6887 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6888 ///
6889 /// All table fields are currently required.
6890 ///
6891 /// - response `is_alternate`
6892 /// - true: The first parent node in common between the calling node and
6893 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
6894 /// that the calling `Node` and the `node_ref` `Node` will not have both
6895 /// their constraints apply - rather sysmem will choose one or the other
6896 /// of the constraints - never both. This is because only one child of
6897 /// a `BufferCollectionTokenGroup` is selected during logical
6898 /// allocation, with only that one child's subtree contributing to
6899 /// constraints aggregation.
6900 /// - false: The first parent node in common between the calling `Node`
6901 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6902 /// Currently, this means the first parent node in common is a
6903 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
6904 /// `Release`ed). This means that the calling `Node` and the `node_ref`
6905 /// `Node` may have both their constraints apply during constraints
6906 /// aggregation of the logical allocation, if both `Node`(s) are
6907 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6908 /// this case, there is no `BufferCollectionTokenGroup` that will
6909 /// directly prevent the two `Node`(s) from both being selected and
6910 /// their constraints both aggregated, but even when false, one or both
6911 /// `Node`(s) may still be eliminated from consideration if one or both
6912 /// `Node`(s) has a direct or indirect parent
6913 /// `BufferCollectionTokenGroup` which selects a child subtree other
6914 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
6915 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6916 /// associated with the same buffer collection as the calling `Node`.
6917 /// Another reason for this error is if the `node_ref` is an
6918 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6919 /// a real `node_ref` obtained from `GetNodeRef`.
6920 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6921 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6922 /// the needed rights expected on a real `node_ref`.
6923 /// * No other failing status codes are returned by this call. However,
6924 /// sysmem may add additional codes in future, so the client should have
6925 /// sensible default handling for any failing status code.
6926 pub fn r#is_alternate_for(
6927 &self,
6928 mut payload: NodeIsAlternateForRequest,
6929 ) -> fidl::client::QueryResponseFut<
6930 NodeIsAlternateForResult,
6931 fidl::encoding::DefaultFuchsiaResourceDialect,
6932 > {
6933 BufferCollectionTokenProxyInterface::r#is_alternate_for(self, payload)
6934 }
6935
6936 /// Get the buffer collection ID. This ID is also available from
6937 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6938 /// within the collection).
6939 ///
6940 /// This call is mainly useful in situations where we can't convey a
6941 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6942 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6943 /// handle, which can be joined back up with a `BufferCollection` client end
6944 /// that was created via a different path. Prefer to convey a
6945 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6946 ///
6947 /// Trusting a `buffer_collection_id` value from a source other than sysmem
6948 /// is analogous to trusting a koid value from a source other than zircon.
6949 /// Both should be avoided unless really necessary, and both require
6950 /// caution. In some situations it may be reasonable to refer to a
6951 /// pre-established `BufferCollection` by `buffer_collection_id` via a
6952 /// protocol for efficiency reasons, but an incoming value purporting to be
6953 /// a `buffer_collection_id` is not sufficient alone to justify granting the
6954 /// sender of the `buffer_collection_id` any capability. The sender must
6955 /// first prove to a receiver that the sender has/had a VMO or has/had a
6956 /// `BufferCollectionToken` to the same collection by sending a handle that
6957 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6958 /// `buffer_collection_id` value. The receiver should take care to avoid
6959 /// assuming that a sender had a `BufferCollectionToken` in cases where the
6960 /// sender has only proven that the sender had a VMO.
6961 ///
6962 /// - response `buffer_collection_id` This ID is unique per buffer
6963 /// collection per boot. Each buffer is uniquely identified by the
6964 /// `buffer_collection_id` and `buffer_index` together.
6965 pub fn r#get_buffer_collection_id(
6966 &self,
6967 ) -> fidl::client::QueryResponseFut<
6968 NodeGetBufferCollectionIdResponse,
6969 fidl::encoding::DefaultFuchsiaResourceDialect,
6970 > {
6971 BufferCollectionTokenProxyInterface::r#get_buffer_collection_id(self)
6972 }
6973
6974 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6975 /// created after this message to weak, which means that a client's `Node`
6976 /// client end (or a child created after this message) is not alone
6977 /// sufficient to keep allocated VMOs alive.
6978 ///
6979 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6980 /// `close_weak_asap`.
6981 ///
6982 /// This message is only permitted before the `Node` becomes ready for
6983 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6984 /// * `BufferCollectionToken`: any time
6985 /// * `BufferCollection`: before `SetConstraints`
6986 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6987 ///
6988 /// Currently, no conversion from strong `Node` to weak `Node` after ready
6989 /// for allocation is provided, but a client can simulate that by creating
6990 /// an additional `Node` before allocation and setting that additional
6991 /// `Node` to weak, and then potentially at some point later sending
6992 /// `Release` and closing the client end of the client's strong `Node`, but
6993 /// keeping the client's weak `Node`.
6994 ///
6995 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6996 /// collection failure (all `Node` client end(s) will see
6997 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6998 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6999 /// this situation until all `Node`(s) are ready for allocation. For initial
7000 /// allocation to succeed, at least one strong `Node` is required to exist
7001 /// at allocation time, but after that client receives VMO handles, that
7002 /// client can `BufferCollection.Release` and close the client end without
7003 /// causing this type of failure.
7004 ///
7005 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
7006 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
7007 /// separately as appropriate.
7008 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
7009 BufferCollectionTokenProxyInterface::r#set_weak(self)
7010 }
7011
7012 /// This indicates to sysmem that the client is prepared to pay attention to
7013 /// `close_weak_asap`.
7014 ///
7015 /// If sent, this message must be before
7016 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
7017 ///
7018 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
7019 /// send this message before `WaitForAllBuffersAllocated`, or a parent
7020 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
7021 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
7022 /// trigger buffer collection failure.
7023 ///
7024 /// This message is necessary because weak sysmem VMOs have not always been
7025 /// a thing, so older clients are not aware of the need to pay attention to
7026 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
7027 /// sysmem weak VMO handles asap. By having this message and requiring
7028 /// participants to indicate their acceptance of this aspect of the overall
7029 /// protocol, we avoid situations where an older client is delivered a weak
7030 /// VMO without any way for sysmem to get that VMO to close quickly later
7031 /// (and on a per-buffer basis).
7032 ///
7033 /// A participant that doesn't handle `close_weak_asap` and also doesn't
7034 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
7035 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
7036 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
7037 /// same participant has a child/delegate which does retrieve VMOs, that
7038 /// child/delegate will need to send `SetWeakOk` before
7039 /// `WaitForAllBuffersAllocated`.
7040 ///
7041 /// + request `for_child_nodes_also` If present and true, this means direct
7042 /// child nodes of this node created after this message plus all
7043 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
7044 /// those nodes. Any child node of this node that was created before this
7045 /// message is not included. This setting is "sticky" in the sense that a
7046 /// subsequent `SetWeakOk` without this bool set to true does not reset
7047 /// the server-side bool. If this creates a problem for a participant, a
7048 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
7049 /// tokens instead, as appropriate. A participant should only set
7050 /// `for_child_nodes_also` true if the participant can really promise to
7051 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
7052 /// weak VMO handles held by participants holding the corresponding child
7053 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
7054 /// which are using sysmem(1) can be weak, despite the clients of those
7055 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
7056 /// direct way to find out about `close_weak_asap`. This only applies to
7057 /// descendents of this `Node` which are using sysmem(1), not to this
7058 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
7059 /// token, which will fail allocation unless an ancestor of this `Node`
7060 /// specified `for_child_nodes_also` true.
7061 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7062 BufferCollectionTokenProxyInterface::r#set_weak_ok(self, payload)
7063 }
7064
7065 /// The server_end will be closed after this `Node` and any child nodes have
7066 /// have released their buffer counts, making those counts available for
7067 /// reservation by a different `Node` via
7068 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
7069 ///
7070 /// The `Node` buffer counts may not be released until the entire tree of
7071 /// `Node`(s) is closed or failed, because
7072 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
7073 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
7074 /// `Node` buffer counts remain reserved until the orphaned node is later
7075 /// cleaned up.
7076 ///
7077 /// If the `Node` exceeds a fairly large number of attached eventpair server
7078 /// ends, a log message will indicate this and the `Node` (and the
7079 /// appropriate) sub-tree will fail.
7080 ///
7081 /// The `server_end` will remain open when
7082 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
7083 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
7084 /// [`fuchsia.sysmem2/BufferCollection`].
7085 ///
7086 /// This message can also be used with a
7087 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7088 pub fn r#attach_node_tracking(
7089 &self,
7090 mut payload: NodeAttachNodeTrackingRequest,
7091 ) -> Result<(), fidl::Error> {
7092 BufferCollectionTokenProxyInterface::r#attach_node_tracking(self, payload)
7093 }
7094
7095 /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
7096 /// one, referring to the same buffer collection.
7097 ///
7098 /// The created tokens are children of this token in the
7099 /// [`fuchsia.sysmem2/Node`] heirarchy.
7100 ///
7101 /// This method can be used to add more participants, by transferring the
7102 /// newly created tokens to additional participants.
7103 ///
7104 /// A new token will be returned for each entry in the
7105 /// `rights_attenuation_masks` array.
7106 ///
7107 /// If the called token may not actually be a valid token due to a
7108 /// potentially hostile/untrusted provider of the token, consider using
7109 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
7110 /// instead of potentially getting stuck indefinitely if
7111 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
7112 /// due to the calling token not being a real token.
7113 ///
7114 /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
7115 /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
7116 /// method, because the sync step is included in this call, at the cost of a
7117 /// round trip during this call.
7118 ///
7119 /// All tokens must be turned in to sysmem via
7120 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7121 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7122 /// successfully allocate buffers (or to logically allocate buffers in the
7123 /// case of subtrees involving
7124 /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
7125 ///
7126 /// All table fields are currently required.
7127 ///
7128 /// + request `rights_attenuation_mask` In each entry of
7129 /// `rights_attenuation_masks`, rights bits that are zero will be absent
7130 /// in the buffer VMO rights obtainable via the corresponding returned
7131 /// token. This allows an initiator or intermediary participant to
7132 /// attenuate the rights available to a participant. This does not allow a
7133 /// participant to gain rights that the participant doesn't already have.
7134 /// The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
7135 /// attenuation should be applied.
7136 /// - response `tokens` The client ends of each newly created token.
7137 pub fn r#duplicate_sync(
7138 &self,
7139 mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7140 ) -> fidl::client::QueryResponseFut<
7141 BufferCollectionTokenDuplicateSyncResponse,
7142 fidl::encoding::DefaultFuchsiaResourceDialect,
7143 > {
7144 BufferCollectionTokenProxyInterface::r#duplicate_sync(self, payload)
7145 }
7146
7147 /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
7148 /// one, referring to the same buffer collection.
7149 ///
7150 /// The created token is a child of this token in the
7151 /// [`fuchsia.sysmem2/Node`] heirarchy.
7152 ///
7153 /// This method can be used to add a participant, by transferring the newly
7154 /// created token to another participant.
7155 ///
7156 /// This one-way message can be used instead of the two-way
7157 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
7158 /// performance sensitive cases where it would be undesireable to wait for
7159 /// sysmem to respond to
7160 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
7161 /// client code isn't structured to make it easy to duplicate all the needed
7162 /// tokens at once.
7163 ///
7164 /// After sending one or more `Duplicate` messages, and before sending the
7165 /// newly created child tokens to other participants (or to other
7166 /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
7167 /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
7168 /// `Sync` call can be made on the token, or on the `BufferCollection`
7169 /// obtained by passing this token to `BindSharedCollection`. Either will
7170 /// ensure that the server knows about the tokens created via `Duplicate`
7171 /// before the other participant sends the token to the server via separate
7172 /// `Allocator` channel.
7173 ///
7174 /// All tokens must be turned in via
7175 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7176 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7177 /// successfully allocate buffers.
7178 ///
7179 /// All table fields are currently required.
7180 ///
7181 /// + request `rights_attenuation_mask` The rights bits that are zero in
7182 /// this mask will be absent in the buffer VMO rights obtainable via the
7183 /// client end of `token_request`. This allows an initiator or
7184 /// intermediary participant to attenuate the rights available to a
7185 /// delegate participant. This does not allow a participant to gain rights
7186 /// that the participant doesn't already have. The value
7187 /// `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
7188 /// should be applied.
7189 /// + These values for rights_attenuation_mask result in no attenuation:
7190 /// + `ZX_RIGHT_SAME_RIGHTS` (preferred)
7191 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is
7192 /// computed)
7193 /// + 0 (deprecated - do not use 0 - an ERROR will go to the log)
7194 /// + request `token_request` is the server end of a `BufferCollectionToken`
7195 /// channel. The client end of this channel acts as another participant in
7196 /// the shared buffer collection.
7197 pub fn r#duplicate(
7198 &self,
7199 mut payload: BufferCollectionTokenDuplicateRequest,
7200 ) -> Result<(), fidl::Error> {
7201 BufferCollectionTokenProxyInterface::r#duplicate(self, payload)
7202 }
7203
7204 /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
7205 ///
7206 /// When the `BufferCollectionToken` is converted to a
7207 /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
7208 /// the `BufferCollection` also.
7209 ///
7210 /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
7211 /// client end without having sent
7212 /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
7213 /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
7214 /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
7215 /// to the root `Node`, which fails the whole buffer collection. In
7216 /// contrast, a dispensable `Node` can fail after buffers are allocated
7217 /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
7218 /// heirarchy.
7219 ///
7220 /// The dispensable `Node` participates in constraints aggregation along
7221 /// with its parent before buffer allocation. If the dispensable `Node`
7222 /// fails before buffers are allocated, the failure propagates to the
7223 /// dispensable `Node`'s parent.
7224 ///
7225 /// After buffers are allocated, failure of the dispensable `Node` (or any
7226 /// child of the dispensable `Node`) does not propagate to the dispensable
7227 /// `Node`'s parent. Failure does propagate from a normal child of a
7228 /// dispensable `Node` to the dispensable `Node`. Failure of a child is
7229 /// blocked from reaching its parent if the child is attached using
7230 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
7231 /// dispensable and the failure occurred after allocation.
7232 ///
7233 /// A dispensable `Node` can be used in cases where a participant needs to
7234 /// provide constraints, but after buffers are allocated, the participant
7235 /// can fail without causing buffer collection failure from the parent
7236 /// `Node`'s point of view.
7237 ///
7238 /// In contrast, `BufferCollection.AttachToken` can be used to create a
7239 /// `BufferCollectionToken` which does not participate in constraints
7240 /// aggregation with its parent `Node`, and whose failure at any time does
7241 /// not propagate to its parent `Node`, and whose potential delay providing
7242 /// constraints does not prevent the parent `Node` from completing its
7243 /// buffer allocation.
7244 ///
7245 /// An initiator (creator of the root `Node` using
7246 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
7247 /// scenarios choose to initially use a dispensable `Node` for a first
7248 /// instance of a participant, and then later if the first instance of that
7249 /// participant fails, a new second instance of that participant my be given
7250 /// a `BufferCollectionToken` created with `AttachToken`.
7251 ///
7252 /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
7253 /// shortly before sending the dispensable `BufferCollectionToken` to a
7254 /// delegate participant. Because `SetDispensable` prevents propagation of
7255 /// child `Node` failure to parent `Node`(s), if the client was relying on
7256 /// noticing child failure via failure of the parent `Node` retained by the
7257 /// client, the client may instead need to notice failure via other means.
7258 /// If other means aren't available/convenient, the client can instead
7259 /// retain the dispensable `Node` and create a child `Node` under that to
7260 /// send to the delegate participant, retaining this `Node` in order to
7261 /// notice failure of the subtree rooted at this `Node` via this `Node`'s
7262 /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
7263 /// (e.g. starting a new instance of the delegate participant and handing it
7264 /// a `BufferCollectionToken` created using
7265 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
7266 /// and clean up in a client-specific way).
7267 ///
7268 /// While it is possible (and potentially useful) to `SetDispensable` on a
7269 /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
7270 /// to later replace a failed dispensable `Node` that was a direct child of
7271 /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
7272 /// (since there's no `AttachToken` on a group). Instead, to enable
7273 /// `AttachToken` replacement in this case, create an additional
7274 /// non-dispensable token that's a direct child of the group and make the
7275 /// existing dispensable token a child of the additional token. This way,
7276 /// the additional token that is a direct child of the group has
7277 /// `BufferCollection.AttachToken` which can be used to replace the failed
7278 /// dispensable token.
7279 ///
7280 /// `SetDispensable` on an already-dispensable token is idempotent.
7281 pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7282 BufferCollectionTokenProxyInterface::r#set_dispensable(self)
7283 }
7284
7285 /// Create a logical OR among a set of tokens, called a
7286 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7287 ///
7288 /// Most sysmem clients and many participants don't need to care about this
7289 /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
7290 /// a participant wants to attempt to include one set of delegate
7291 /// participants, but if constraints don't combine successfully that way,
7292 /// fall back to a different (possibly overlapping) set of delegate
7293 /// participants, and/or fall back to a less demanding strategy (in terms of
7294 /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
7295 /// across all involved delegate participants). In such cases, a
7296 /// `BufferCollectionTokenGroup` is useful.
7297 ///
7298 /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
7299 /// child [`fuchsia.sysmem2/BufferCollectionToken`](s). The child tokens
7300 /// which are not selected during aggregation will fail (close), which a
7301 /// potential participant should notice when their `BufferCollection`
7302 /// channel client endpoint sees PEER_CLOSED, allowing the participant to
7303 /// clean up the speculative usage that didn't end up happening (this is
7304 /// simimlar to a normal `BufferCollection` server end closing on failure to
7305 /// allocate a logical buffer collection or later async failure of a buffer
7306 /// collection).
7307 ///
7308 /// See comments on protocol `BufferCollectionTokenGroup`.
7309 ///
7310 /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
7311 /// applied to the whole group can be achieved with a
7312 /// `BufferCollectionToken` for this purpose as a direct parent of the
7313 /// `BufferCollectionTokenGroup`.
7314 ///
7315 /// All table fields are currently required.
7316 ///
7317 /// + request `group_request` The server end of a
7318 /// `BufferCollectionTokenGroup` channel to be served by sysmem.
7319 pub fn r#create_buffer_collection_token_group(
7320 &self,
7321 mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7322 ) -> Result<(), fidl::Error> {
7323 BufferCollectionTokenProxyInterface::r#create_buffer_collection_token_group(self, payload)
7324 }
7325}
7326
7327impl BufferCollectionTokenProxyInterface for BufferCollectionTokenProxy {
7328 type SyncResponseFut =
7329 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
7330 fn r#sync(&self) -> Self::SyncResponseFut {
7331 fn _decode(
7332 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7333 ) -> Result<(), fidl::Error> {
7334 let _response = fidl::client::decode_transaction_body::<
7335 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
7336 fidl::encoding::DefaultFuchsiaResourceDialect,
7337 0x11ac2555cf575b54,
7338 >(_buf?)?
7339 .into_result::<BufferCollectionTokenMarker>("sync")?;
7340 Ok(_response)
7341 }
7342 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
7343 (),
7344 0x11ac2555cf575b54,
7345 fidl::encoding::DynamicFlags::FLEXIBLE,
7346 _decode,
7347 )
7348 }
7349
7350 fn r#release(&self) -> Result<(), fidl::Error> {
7351 self.client.send::<fidl::encoding::EmptyPayload>(
7352 (),
7353 0x6a5cae7d6d6e04c6,
7354 fidl::encoding::DynamicFlags::FLEXIBLE,
7355 )
7356 }
7357
7358 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
7359 self.client.send::<NodeSetNameRequest>(
7360 payload,
7361 0xb41f1624f48c1e9,
7362 fidl::encoding::DynamicFlags::FLEXIBLE,
7363 )
7364 }
7365
7366 fn r#set_debug_client_info(
7367 &self,
7368 mut payload: &NodeSetDebugClientInfoRequest,
7369 ) -> Result<(), fidl::Error> {
7370 self.client.send::<NodeSetDebugClientInfoRequest>(
7371 payload,
7372 0x5cde8914608d99b1,
7373 fidl::encoding::DynamicFlags::FLEXIBLE,
7374 )
7375 }
7376
7377 fn r#set_debug_timeout_log_deadline(
7378 &self,
7379 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
7380 ) -> Result<(), fidl::Error> {
7381 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
7382 payload,
7383 0x716b0af13d5c0806,
7384 fidl::encoding::DynamicFlags::FLEXIBLE,
7385 )
7386 }
7387
7388 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
7389 self.client.send::<fidl::encoding::EmptyPayload>(
7390 (),
7391 0x5209c77415b4dfad,
7392 fidl::encoding::DynamicFlags::FLEXIBLE,
7393 )
7394 }
7395
7396 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
7397 NodeGetNodeRefResponse,
7398 fidl::encoding::DefaultFuchsiaResourceDialect,
7399 >;
7400 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
7401 fn _decode(
7402 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7403 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
7404 let _response = fidl::client::decode_transaction_body::<
7405 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
7406 fidl::encoding::DefaultFuchsiaResourceDialect,
7407 0x5b3d0e51614df053,
7408 >(_buf?)?
7409 .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
7410 Ok(_response)
7411 }
7412 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
7413 (),
7414 0x5b3d0e51614df053,
7415 fidl::encoding::DynamicFlags::FLEXIBLE,
7416 _decode,
7417 )
7418 }
7419
7420 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
7421 NodeIsAlternateForResult,
7422 fidl::encoding::DefaultFuchsiaResourceDialect,
7423 >;
7424 fn r#is_alternate_for(
7425 &self,
7426 mut payload: NodeIsAlternateForRequest,
7427 ) -> Self::IsAlternateForResponseFut {
7428 fn _decode(
7429 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7430 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
7431 let _response = fidl::client::decode_transaction_body::<
7432 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
7433 fidl::encoding::DefaultFuchsiaResourceDialect,
7434 0x3a58e00157e0825,
7435 >(_buf?)?
7436 .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
7437 Ok(_response.map(|x| x))
7438 }
7439 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
7440 &mut payload,
7441 0x3a58e00157e0825,
7442 fidl::encoding::DynamicFlags::FLEXIBLE,
7443 _decode,
7444 )
7445 }
7446
7447 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
7448 NodeGetBufferCollectionIdResponse,
7449 fidl::encoding::DefaultFuchsiaResourceDialect,
7450 >;
7451 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
7452 fn _decode(
7453 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7454 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
7455 let _response = fidl::client::decode_transaction_body::<
7456 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
7457 fidl::encoding::DefaultFuchsiaResourceDialect,
7458 0x77d19a494b78ba8c,
7459 >(_buf?)?
7460 .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
7461 Ok(_response)
7462 }
7463 self.client.send_query_and_decode::<
7464 fidl::encoding::EmptyPayload,
7465 NodeGetBufferCollectionIdResponse,
7466 >(
7467 (),
7468 0x77d19a494b78ba8c,
7469 fidl::encoding::DynamicFlags::FLEXIBLE,
7470 _decode,
7471 )
7472 }
7473
7474 fn r#set_weak(&self) -> Result<(), fidl::Error> {
7475 self.client.send::<fidl::encoding::EmptyPayload>(
7476 (),
7477 0x22dd3ea514eeffe1,
7478 fidl::encoding::DynamicFlags::FLEXIBLE,
7479 )
7480 }
7481
7482 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7483 self.client.send::<NodeSetWeakOkRequest>(
7484 &mut payload,
7485 0x38a44fc4d7724be9,
7486 fidl::encoding::DynamicFlags::FLEXIBLE,
7487 )
7488 }
7489
7490 fn r#attach_node_tracking(
7491 &self,
7492 mut payload: NodeAttachNodeTrackingRequest,
7493 ) -> Result<(), fidl::Error> {
7494 self.client.send::<NodeAttachNodeTrackingRequest>(
7495 &mut payload,
7496 0x3f22f2a293d3cdac,
7497 fidl::encoding::DynamicFlags::FLEXIBLE,
7498 )
7499 }
7500
7501 type DuplicateSyncResponseFut = fidl::client::QueryResponseFut<
7502 BufferCollectionTokenDuplicateSyncResponse,
7503 fidl::encoding::DefaultFuchsiaResourceDialect,
7504 >;
7505 fn r#duplicate_sync(
7506 &self,
7507 mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7508 ) -> Self::DuplicateSyncResponseFut {
7509 fn _decode(
7510 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7511 ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
7512 let _response = fidl::client::decode_transaction_body::<
7513 fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
7514 fidl::encoding::DefaultFuchsiaResourceDialect,
7515 0x1c1af9919d1ca45c,
7516 >(_buf?)?
7517 .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
7518 Ok(_response)
7519 }
7520 self.client.send_query_and_decode::<
7521 BufferCollectionTokenDuplicateSyncRequest,
7522 BufferCollectionTokenDuplicateSyncResponse,
7523 >(
7524 payload,
7525 0x1c1af9919d1ca45c,
7526 fidl::encoding::DynamicFlags::FLEXIBLE,
7527 _decode,
7528 )
7529 }
7530
7531 fn r#duplicate(
7532 &self,
7533 mut payload: BufferCollectionTokenDuplicateRequest,
7534 ) -> Result<(), fidl::Error> {
7535 self.client.send::<BufferCollectionTokenDuplicateRequest>(
7536 &mut payload,
7537 0x73e78f92ee7fb887,
7538 fidl::encoding::DynamicFlags::FLEXIBLE,
7539 )
7540 }
7541
7542 fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7543 self.client.send::<fidl::encoding::EmptyPayload>(
7544 (),
7545 0x228acf979254df8b,
7546 fidl::encoding::DynamicFlags::FLEXIBLE,
7547 )
7548 }
7549
7550 fn r#create_buffer_collection_token_group(
7551 &self,
7552 mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7553 ) -> Result<(), fidl::Error> {
7554 self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
7555 &mut payload,
7556 0x30f8d48e77bd36f2,
7557 fidl::encoding::DynamicFlags::FLEXIBLE,
7558 )
7559 }
7560}
7561
7562pub struct BufferCollectionTokenEventStream {
7563 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
7564}
7565
7566impl std::marker::Unpin for BufferCollectionTokenEventStream {}
7567
7568impl futures::stream::FusedStream for BufferCollectionTokenEventStream {
7569 fn is_terminated(&self) -> bool {
7570 self.event_receiver.is_terminated()
7571 }
7572}
7573
7574impl futures::Stream for BufferCollectionTokenEventStream {
7575 type Item = Result<BufferCollectionTokenEvent, fidl::Error>;
7576
7577 fn poll_next(
7578 mut self: std::pin::Pin<&mut Self>,
7579 cx: &mut std::task::Context<'_>,
7580 ) -> std::task::Poll<Option<Self::Item>> {
7581 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
7582 &mut self.event_receiver,
7583 cx
7584 )?) {
7585 Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenEvent::decode(buf))),
7586 None => std::task::Poll::Ready(None),
7587 }
7588 }
7589}
7590
7591#[derive(Debug)]
7592pub enum BufferCollectionTokenEvent {
7593 #[non_exhaustive]
7594 _UnknownEvent {
7595 /// Ordinal of the event that was sent.
7596 ordinal: u64,
7597 },
7598}
7599
7600impl BufferCollectionTokenEvent {
7601 /// Decodes a message buffer as a [`BufferCollectionTokenEvent`].
7602 fn decode(
7603 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
7604 ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
7605 let (bytes, _handles) = buf.split_mut();
7606 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7607 debug_assert_eq!(tx_header.tx_id, 0);
7608 match tx_header.ordinal {
7609 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7610 Ok(BufferCollectionTokenEvent::_UnknownEvent { ordinal: tx_header.ordinal })
7611 }
7612 _ => Err(fidl::Error::UnknownOrdinal {
7613 ordinal: tx_header.ordinal,
7614 protocol_name:
7615 <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7616 }),
7617 }
7618 }
7619}
7620
7621/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionToken.
7622pub struct BufferCollectionTokenRequestStream {
7623 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7624 is_terminated: bool,
7625}
7626
7627impl std::marker::Unpin for BufferCollectionTokenRequestStream {}
7628
7629impl futures::stream::FusedStream for BufferCollectionTokenRequestStream {
7630 fn is_terminated(&self) -> bool {
7631 self.is_terminated
7632 }
7633}
7634
7635impl fidl::endpoints::RequestStream for BufferCollectionTokenRequestStream {
7636 type Protocol = BufferCollectionTokenMarker;
7637 type ControlHandle = BufferCollectionTokenControlHandle;
7638
7639 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
7640 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
7641 }
7642
7643 fn control_handle(&self) -> Self::ControlHandle {
7644 BufferCollectionTokenControlHandle { inner: self.inner.clone() }
7645 }
7646
7647 fn into_inner(
7648 self,
7649 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
7650 {
7651 (self.inner, self.is_terminated)
7652 }
7653
7654 fn from_inner(
7655 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7656 is_terminated: bool,
7657 ) -> Self {
7658 Self { inner, is_terminated }
7659 }
7660}
7661
7662impl futures::Stream for BufferCollectionTokenRequestStream {
7663 type Item = Result<BufferCollectionTokenRequest, fidl::Error>;
7664
7665 fn poll_next(
7666 mut self: std::pin::Pin<&mut Self>,
7667 cx: &mut std::task::Context<'_>,
7668 ) -> std::task::Poll<Option<Self::Item>> {
7669 let this = &mut *self;
7670 if this.inner.check_shutdown(cx) {
7671 this.is_terminated = true;
7672 return std::task::Poll::Ready(None);
7673 }
7674 if this.is_terminated {
7675 panic!("polled BufferCollectionTokenRequestStream after completion");
7676 }
7677 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
7678 |bytes, handles| {
7679 match this.inner.channel().read_etc(cx, bytes, handles) {
7680 std::task::Poll::Ready(Ok(())) => {}
7681 std::task::Poll::Pending => return std::task::Poll::Pending,
7682 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
7683 this.is_terminated = true;
7684 return std::task::Poll::Ready(None);
7685 }
7686 std::task::Poll::Ready(Err(e)) => {
7687 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
7688 e.into(),
7689 ))));
7690 }
7691 }
7692
7693 // A message has been received from the channel
7694 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7695
7696 std::task::Poll::Ready(Some(match header.ordinal {
7697 0x11ac2555cf575b54 => {
7698 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7699 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7700 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7701 let control_handle = BufferCollectionTokenControlHandle {
7702 inner: this.inner.clone(),
7703 };
7704 Ok(BufferCollectionTokenRequest::Sync {
7705 responder: BufferCollectionTokenSyncResponder {
7706 control_handle: std::mem::ManuallyDrop::new(control_handle),
7707 tx_id: header.tx_id,
7708 },
7709 })
7710 }
7711 0x6a5cae7d6d6e04c6 => {
7712 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7713 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7714 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7715 let control_handle = BufferCollectionTokenControlHandle {
7716 inner: this.inner.clone(),
7717 };
7718 Ok(BufferCollectionTokenRequest::Release {
7719 control_handle,
7720 })
7721 }
7722 0xb41f1624f48c1e9 => {
7723 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7724 let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7725 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
7726 let control_handle = BufferCollectionTokenControlHandle {
7727 inner: this.inner.clone(),
7728 };
7729 Ok(BufferCollectionTokenRequest::SetName {payload: req,
7730 control_handle,
7731 })
7732 }
7733 0x5cde8914608d99b1 => {
7734 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7735 let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7736 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
7737 let control_handle = BufferCollectionTokenControlHandle {
7738 inner: this.inner.clone(),
7739 };
7740 Ok(BufferCollectionTokenRequest::SetDebugClientInfo {payload: req,
7741 control_handle,
7742 })
7743 }
7744 0x716b0af13d5c0806 => {
7745 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7746 let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7747 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
7748 let control_handle = BufferCollectionTokenControlHandle {
7749 inner: this.inner.clone(),
7750 };
7751 Ok(BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {payload: req,
7752 control_handle,
7753 })
7754 }
7755 0x5209c77415b4dfad => {
7756 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7757 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7758 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7759 let control_handle = BufferCollectionTokenControlHandle {
7760 inner: this.inner.clone(),
7761 };
7762 Ok(BufferCollectionTokenRequest::SetVerboseLogging {
7763 control_handle,
7764 })
7765 }
7766 0x5b3d0e51614df053 => {
7767 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7768 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7769 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7770 let control_handle = BufferCollectionTokenControlHandle {
7771 inner: this.inner.clone(),
7772 };
7773 Ok(BufferCollectionTokenRequest::GetNodeRef {
7774 responder: BufferCollectionTokenGetNodeRefResponder {
7775 control_handle: std::mem::ManuallyDrop::new(control_handle),
7776 tx_id: header.tx_id,
7777 },
7778 })
7779 }
7780 0x3a58e00157e0825 => {
7781 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7782 let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7783 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
7784 let control_handle = BufferCollectionTokenControlHandle {
7785 inner: this.inner.clone(),
7786 };
7787 Ok(BufferCollectionTokenRequest::IsAlternateFor {payload: req,
7788 responder: BufferCollectionTokenIsAlternateForResponder {
7789 control_handle: std::mem::ManuallyDrop::new(control_handle),
7790 tx_id: header.tx_id,
7791 },
7792 })
7793 }
7794 0x77d19a494b78ba8c => {
7795 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7796 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7797 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7798 let control_handle = BufferCollectionTokenControlHandle {
7799 inner: this.inner.clone(),
7800 };
7801 Ok(BufferCollectionTokenRequest::GetBufferCollectionId {
7802 responder: BufferCollectionTokenGetBufferCollectionIdResponder {
7803 control_handle: std::mem::ManuallyDrop::new(control_handle),
7804 tx_id: header.tx_id,
7805 },
7806 })
7807 }
7808 0x22dd3ea514eeffe1 => {
7809 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7810 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7811 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7812 let control_handle = BufferCollectionTokenControlHandle {
7813 inner: this.inner.clone(),
7814 };
7815 Ok(BufferCollectionTokenRequest::SetWeak {
7816 control_handle,
7817 })
7818 }
7819 0x38a44fc4d7724be9 => {
7820 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7821 let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7822 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
7823 let control_handle = BufferCollectionTokenControlHandle {
7824 inner: this.inner.clone(),
7825 };
7826 Ok(BufferCollectionTokenRequest::SetWeakOk {payload: req,
7827 control_handle,
7828 })
7829 }
7830 0x3f22f2a293d3cdac => {
7831 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7832 let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7833 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
7834 let control_handle = BufferCollectionTokenControlHandle {
7835 inner: this.inner.clone(),
7836 };
7837 Ok(BufferCollectionTokenRequest::AttachNodeTracking {payload: req,
7838 control_handle,
7839 })
7840 }
7841 0x1c1af9919d1ca45c => {
7842 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7843 let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7844 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateSyncRequest>(&header, _body_bytes, handles, &mut req)?;
7845 let control_handle = BufferCollectionTokenControlHandle {
7846 inner: this.inner.clone(),
7847 };
7848 Ok(BufferCollectionTokenRequest::DuplicateSync {payload: req,
7849 responder: BufferCollectionTokenDuplicateSyncResponder {
7850 control_handle: std::mem::ManuallyDrop::new(control_handle),
7851 tx_id: header.tx_id,
7852 },
7853 })
7854 }
7855 0x73e78f92ee7fb887 => {
7856 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7857 let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7858 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateRequest>(&header, _body_bytes, handles, &mut req)?;
7859 let control_handle = BufferCollectionTokenControlHandle {
7860 inner: this.inner.clone(),
7861 };
7862 Ok(BufferCollectionTokenRequest::Duplicate {payload: req,
7863 control_handle,
7864 })
7865 }
7866 0x228acf979254df8b => {
7867 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7868 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7869 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7870 let control_handle = BufferCollectionTokenControlHandle {
7871 inner: this.inner.clone(),
7872 };
7873 Ok(BufferCollectionTokenRequest::SetDispensable {
7874 control_handle,
7875 })
7876 }
7877 0x30f8d48e77bd36f2 => {
7878 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7879 let mut req = fidl::new_empty!(BufferCollectionTokenCreateBufferCollectionTokenGroupRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7880 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(&header, _body_bytes, handles, &mut req)?;
7881 let control_handle = BufferCollectionTokenControlHandle {
7882 inner: this.inner.clone(),
7883 };
7884 Ok(BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {payload: req,
7885 control_handle,
7886 })
7887 }
7888 _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7889 Ok(BufferCollectionTokenRequest::_UnknownMethod {
7890 ordinal: header.ordinal,
7891 control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7892 method_type: fidl::MethodType::OneWay,
7893 })
7894 }
7895 _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7896 this.inner.send_framework_err(
7897 fidl::encoding::FrameworkErr::UnknownMethod,
7898 header.tx_id,
7899 header.ordinal,
7900 header.dynamic_flags(),
7901 (bytes, handles),
7902 )?;
7903 Ok(BufferCollectionTokenRequest::_UnknownMethod {
7904 ordinal: header.ordinal,
7905 control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7906 method_type: fidl::MethodType::TwoWay,
7907 })
7908 }
7909 _ => Err(fidl::Error::UnknownOrdinal {
7910 ordinal: header.ordinal,
7911 protocol_name: <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7912 }),
7913 }))
7914 },
7915 )
7916 }
7917}
7918
7919/// A [`fuchsia.sysmem2/BufferCollectionToken`] is not a buffer collection, but
7920/// rather is a way to identify a specific potential shared buffer collection,
7921/// and a way to distribute that potential shared buffer collection to
7922/// additional participants prior to the buffer collection allocating any
7923/// buffers.
7924///
7925/// Epitaphs are not used in this protocol.
7926///
7927/// We use a channel for the `BufferCollectionToken` instead of a single
7928/// `eventpair` (pair) because this way we can detect error conditions like a
7929/// participant failing mid-create.
7930#[derive(Debug)]
7931pub enum BufferCollectionTokenRequest {
7932 /// Ensure that previous messages have been received server side. This is
7933 /// particularly useful after previous messages that created new tokens,
7934 /// because a token must be known to the sysmem server before sending the
7935 /// token to another participant.
7936 ///
7937 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
7938 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
7939 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
7940 /// to mitigate the possibility of a hostile/fake
7941 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
7942 /// Another way is to pass the token to
7943 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
7944 /// the token as part of exchanging it for a
7945 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
7946 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
7947 /// of stalling.
7948 ///
7949 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
7950 /// and then starting and completing a `Sync`, it's then safe to send the
7951 /// `BufferCollectionToken` client ends to other participants knowing the
7952 /// server will recognize the tokens when they're sent by the other
7953 /// participants to sysmem in a
7954 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
7955 /// efficient way to create tokens while avoiding unnecessary round trips.
7956 ///
7957 /// Other options include waiting for each
7958 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
7959 /// individually (using separate call to `Sync` after each), or calling
7960 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
7961 /// converted to a `BufferCollection` via
7962 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
7963 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
7964 /// the sync step and can create multiple tokens at once.
7965 Sync { responder: BufferCollectionTokenSyncResponder },
7966 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
7967 ///
7968 /// Normally a participant will convert a `BufferCollectionToken` into a
7969 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
7970 /// `Release` via the token (and then close the channel immediately or
7971 /// shortly later in response to server closing the server end), which
7972 /// avoids causing buffer collection failure. Without a prior `Release`,
7973 /// closing the `BufferCollectionToken` client end will cause buffer
7974 /// collection failure.
7975 ///
7976 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
7977 ///
7978 /// By default the server handles unexpected closure of a
7979 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
7980 /// first) by failing the buffer collection. Partly this is to expedite
7981 /// closing VMO handles to reclaim memory when any participant fails. If a
7982 /// participant would like to cleanly close a `BufferCollection` without
7983 /// causing buffer collection failure, the participant can send `Release`
7984 /// before closing the `BufferCollection` client end. The `Release` can
7985 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
7986 /// buffer collection won't require constraints from this node in order to
7987 /// allocate. If after `SetConstraints`, the constraints are retained and
7988 /// aggregated, despite the lack of `BufferCollection` connection at the
7989 /// time of constraints aggregation.
7990 ///
7991 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
7992 ///
7993 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
7994 /// end (without `Release` first) will trigger failure of the buffer
7995 /// collection. To close a `BufferCollectionTokenGroup` channel without
7996 /// failing the buffer collection, ensure that AllChildrenPresent() has been
7997 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
7998 /// client end.
7999 ///
8000 /// If `Release` occurs before
8001 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
8002 /// buffer collection will fail (triggered by reception of `Release` without
8003 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
8004 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
8005 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
8006 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
8007 /// close requires `AllChildrenPresent` (if not already sent), then
8008 /// `Release`, then close client end.
8009 ///
8010 /// If `Release` occurs after `AllChildrenPresent`, the children and all
8011 /// their constraints remain intact (just as they would if the
8012 /// `BufferCollectionTokenGroup` channel had remained open), and the client
8013 /// end close doesn't trigger buffer collection failure.
8014 ///
8015 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
8016 ///
8017 /// For brevity, the per-channel-protocol paragraphs above ignore the
8018 /// separate failure domain created by
8019 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
8020 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
8021 /// unexpectedly closes (without `Release` first) and that client end is
8022 /// under a failure domain, instead of failing the whole buffer collection,
8023 /// the failure domain is failed, but the buffer collection itself is
8024 /// isolated from failure of the failure domain. Such failure domains can be
8025 /// nested, in which case only the inner-most failure domain in which the
8026 /// `Node` resides fails.
8027 Release { control_handle: BufferCollectionTokenControlHandle },
8028 /// Set a name for VMOs in this buffer collection.
8029 ///
8030 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
8031 /// will be truncated to fit. The name of the vmo will be suffixed with the
8032 /// buffer index within the collection (if the suffix fits within
8033 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
8034 /// listed in the inspect data.
8035 ///
8036 /// The name only affects VMOs allocated after the name is set; this call
8037 /// does not rename existing VMOs. If multiple clients set different names
8038 /// then the larger priority value will win. Setting a new name with the
8039 /// same priority as a prior name doesn't change the name.
8040 ///
8041 /// All table fields are currently required.
8042 ///
8043 /// + request `priority` The name is only set if this is the first `SetName`
8044 /// or if `priority` is greater than any previous `priority` value in
8045 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
8046 /// + request `name` The name for VMOs created under this buffer collection.
8047 SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenControlHandle },
8048 /// Set information about the current client that can be used by sysmem to
8049 /// help diagnose leaking memory and allocation stalls waiting for a
8050 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
8051 ///
8052 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
8053 /// `Node`(s) derived from this `Node`, unless overriden by
8054 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
8055 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
8056 ///
8057 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
8058 /// `Allocator` is the most efficient way to ensure that all
8059 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
8060 /// set, and is also more efficient than separately sending the same debug
8061 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
8062 /// created [`fuchsia.sysmem2/Node`].
8063 ///
8064 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
8065 /// indicate which client is closing their channel first, leading to subtree
8066 /// failure (which can be normal if the purpose of the subtree is over, but
8067 /// if happening earlier than expected, the client-channel-specific name can
8068 /// help diagnose where the failure is first coming from, from sysmem's
8069 /// point of view).
8070 ///
8071 /// All table fields are currently required.
8072 ///
8073 /// + request `name` This can be an arbitrary string, but the current
8074 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
8075 /// + request `id` This can be an arbitrary id, but the current process ID
8076 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
8077 SetDebugClientInfo {
8078 payload: NodeSetDebugClientInfoRequest,
8079 control_handle: BufferCollectionTokenControlHandle,
8080 },
8081 /// Sysmem logs a warning if sysmem hasn't seen
8082 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
8083 /// within 5 seconds after creation of a new collection.
8084 ///
8085 /// Clients can call this method to change when the log is printed. If
8086 /// multiple client set the deadline, it's unspecified which deadline will
8087 /// take effect.
8088 ///
8089 /// In most cases the default works well.
8090 ///
8091 /// All table fields are currently required.
8092 ///
8093 /// + request `deadline` The time at which sysmem will start trying to log
8094 /// the warning, unless all constraints are with sysmem by then.
8095 SetDebugTimeoutLogDeadline {
8096 payload: NodeSetDebugTimeoutLogDeadlineRequest,
8097 control_handle: BufferCollectionTokenControlHandle,
8098 },
8099 /// This enables verbose logging for the buffer collection.
8100 ///
8101 /// Verbose logging includes constraints set via
8102 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
8103 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
8104 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
8105 /// the tree of `Node`(s).
8106 ///
8107 /// Normally sysmem prints only a single line complaint when aggregation
8108 /// fails, with just the specific detailed reason that aggregation failed,
8109 /// with little surrounding context. While this is often enough to diagnose
8110 /// a problem if only a small change was made and everything was working
8111 /// before the small change, it's often not particularly helpful for getting
8112 /// a new buffer collection to work for the first time. Especially with
8113 /// more complex trees of nodes, involving things like
8114 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
8115 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
8116 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
8117 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
8118 /// looks like and why it's failing a logical allocation, or why a tree or
8119 /// subtree is failing sooner than expected.
8120 ///
8121 /// The intent of the extra logging is to be acceptable from a performance
8122 /// point of view, under the assumption that verbose logging is only enabled
8123 /// on a low number of buffer collections. If we're not tracking down a bug,
8124 /// we shouldn't send this message.
8125 SetVerboseLogging { control_handle: BufferCollectionTokenControlHandle },
8126 /// This gets a handle that can be used as a parameter to
8127 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
8128 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
8129 /// client obtained this handle from this `Node`.
8130 ///
8131 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
8132 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
8133 /// despite the two calls typically being on different channels.
8134 ///
8135 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
8136 ///
8137 /// All table fields are currently required.
8138 ///
8139 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
8140 /// different `Node` channel, to prove that the client obtained the handle
8141 /// from this `Node`.
8142 GetNodeRef { responder: BufferCollectionTokenGetNodeRefResponder },
8143 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
8144 /// rooted at a different child token of a common parent
8145 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
8146 /// passed-in `node_ref`.
8147 ///
8148 /// This call is for assisting with admission control de-duplication, and
8149 /// with debugging.
8150 ///
8151 /// The `node_ref` must be obtained using
8152 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
8153 ///
8154 /// The `node_ref` can be a duplicated handle; it's not necessary to call
8155 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
8156 ///
8157 /// If a calling token may not actually be a valid token at all due to a
8158 /// potentially hostile/untrusted provider of the token, call
8159 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8160 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
8161 /// never responds due to a calling token not being a real token (not really
8162 /// talking to sysmem). Another option is to call
8163 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
8164 /// which also validates the token along with converting it to a
8165 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
8166 ///
8167 /// All table fields are currently required.
8168 ///
8169 /// - response `is_alternate`
8170 /// - true: The first parent node in common between the calling node and
8171 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
8172 /// that the calling `Node` and the `node_ref` `Node` will not have both
8173 /// their constraints apply - rather sysmem will choose one or the other
8174 /// of the constraints - never both. This is because only one child of
8175 /// a `BufferCollectionTokenGroup` is selected during logical
8176 /// allocation, with only that one child's subtree contributing to
8177 /// constraints aggregation.
8178 /// - false: The first parent node in common between the calling `Node`
8179 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
8180 /// Currently, this means the first parent node in common is a
8181 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
8182 /// `Release`ed). This means that the calling `Node` and the `node_ref`
8183 /// `Node` may have both their constraints apply during constraints
8184 /// aggregation of the logical allocation, if both `Node`(s) are
8185 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
8186 /// this case, there is no `BufferCollectionTokenGroup` that will
8187 /// directly prevent the two `Node`(s) from both being selected and
8188 /// their constraints both aggregated, but even when false, one or both
8189 /// `Node`(s) may still be eliminated from consideration if one or both
8190 /// `Node`(s) has a direct or indirect parent
8191 /// `BufferCollectionTokenGroup` which selects a child subtree other
8192 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
8193 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
8194 /// associated with the same buffer collection as the calling `Node`.
8195 /// Another reason for this error is if the `node_ref` is an
8196 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
8197 /// a real `node_ref` obtained from `GetNodeRef`.
8198 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
8199 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
8200 /// the needed rights expected on a real `node_ref`.
8201 /// * No other failing status codes are returned by this call. However,
8202 /// sysmem may add additional codes in future, so the client should have
8203 /// sensible default handling for any failing status code.
8204 IsAlternateFor {
8205 payload: NodeIsAlternateForRequest,
8206 responder: BufferCollectionTokenIsAlternateForResponder,
8207 },
8208 /// Get the buffer collection ID. This ID is also available from
8209 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
8210 /// within the collection).
8211 ///
8212 /// This call is mainly useful in situations where we can't convey a
8213 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
8214 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
8215 /// handle, which can be joined back up with a `BufferCollection` client end
8216 /// that was created via a different path. Prefer to convey a
8217 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
8218 ///
8219 /// Trusting a `buffer_collection_id` value from a source other than sysmem
8220 /// is analogous to trusting a koid value from a source other than zircon.
8221 /// Both should be avoided unless really necessary, and both require
8222 /// caution. In some situations it may be reasonable to refer to a
8223 /// pre-established `BufferCollection` by `buffer_collection_id` via a
8224 /// protocol for efficiency reasons, but an incoming value purporting to be
8225 /// a `buffer_collection_id` is not sufficient alone to justify granting the
8226 /// sender of the `buffer_collection_id` any capability. The sender must
8227 /// first prove to a receiver that the sender has/had a VMO or has/had a
8228 /// `BufferCollectionToken` to the same collection by sending a handle that
8229 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
8230 /// `buffer_collection_id` value. The receiver should take care to avoid
8231 /// assuming that a sender had a `BufferCollectionToken` in cases where the
8232 /// sender has only proven that the sender had a VMO.
8233 ///
8234 /// - response `buffer_collection_id` This ID is unique per buffer
8235 /// collection per boot. Each buffer is uniquely identified by the
8236 /// `buffer_collection_id` and `buffer_index` together.
8237 GetBufferCollectionId { responder: BufferCollectionTokenGetBufferCollectionIdResponder },
8238 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
8239 /// created after this message to weak, which means that a client's `Node`
8240 /// client end (or a child created after this message) is not alone
8241 /// sufficient to keep allocated VMOs alive.
8242 ///
8243 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
8244 /// `close_weak_asap`.
8245 ///
8246 /// This message is only permitted before the `Node` becomes ready for
8247 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
8248 /// * `BufferCollectionToken`: any time
8249 /// * `BufferCollection`: before `SetConstraints`
8250 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
8251 ///
8252 /// Currently, no conversion from strong `Node` to weak `Node` after ready
8253 /// for allocation is provided, but a client can simulate that by creating
8254 /// an additional `Node` before allocation and setting that additional
8255 /// `Node` to weak, and then potentially at some point later sending
8256 /// `Release` and closing the client end of the client's strong `Node`, but
8257 /// keeping the client's weak `Node`.
8258 ///
8259 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
8260 /// collection failure (all `Node` client end(s) will see
8261 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
8262 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
8263 /// this situation until all `Node`(s) are ready for allocation. For initial
8264 /// allocation to succeed, at least one strong `Node` is required to exist
8265 /// at allocation time, but after that client receives VMO handles, that
8266 /// client can `BufferCollection.Release` and close the client end without
8267 /// causing this type of failure.
8268 ///
8269 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
8270 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
8271 /// separately as appropriate.
8272 SetWeak { control_handle: BufferCollectionTokenControlHandle },
8273 /// This indicates to sysmem that the client is prepared to pay attention to
8274 /// `close_weak_asap`.
8275 ///
8276 /// If sent, this message must be before
8277 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
8278 ///
8279 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
8280 /// send this message before `WaitForAllBuffersAllocated`, or a parent
8281 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
8282 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
8283 /// trigger buffer collection failure.
8284 ///
8285 /// This message is necessary because weak sysmem VMOs have not always been
8286 /// a thing, so older clients are not aware of the need to pay attention to
8287 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
8288 /// sysmem weak VMO handles asap. By having this message and requiring
8289 /// participants to indicate their acceptance of this aspect of the overall
8290 /// protocol, we avoid situations where an older client is delivered a weak
8291 /// VMO without any way for sysmem to get that VMO to close quickly later
8292 /// (and on a per-buffer basis).
8293 ///
8294 /// A participant that doesn't handle `close_weak_asap` and also doesn't
8295 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
8296 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
8297 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
8298 /// same participant has a child/delegate which does retrieve VMOs, that
8299 /// child/delegate will need to send `SetWeakOk` before
8300 /// `WaitForAllBuffersAllocated`.
8301 ///
8302 /// + request `for_child_nodes_also` If present and true, this means direct
8303 /// child nodes of this node created after this message plus all
8304 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
8305 /// those nodes. Any child node of this node that was created before this
8306 /// message is not included. This setting is "sticky" in the sense that a
8307 /// subsequent `SetWeakOk` without this bool set to true does not reset
8308 /// the server-side bool. If this creates a problem for a participant, a
8309 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
8310 /// tokens instead, as appropriate. A participant should only set
8311 /// `for_child_nodes_also` true if the participant can really promise to
8312 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
8313 /// weak VMO handles held by participants holding the corresponding child
8314 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
8315 /// which are using sysmem(1) can be weak, despite the clients of those
8316 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
8317 /// direct way to find out about `close_weak_asap`. This only applies to
8318 /// descendents of this `Node` which are using sysmem(1), not to this
8319 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
8320 /// token, which will fail allocation unless an ancestor of this `Node`
8321 /// specified `for_child_nodes_also` true.
8322 SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionTokenControlHandle },
8323 /// The server_end will be closed after this `Node` and any child nodes have
8324 /// have released their buffer counts, making those counts available for
8325 /// reservation by a different `Node` via
8326 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
8327 ///
8328 /// The `Node` buffer counts may not be released until the entire tree of
8329 /// `Node`(s) is closed or failed, because
8330 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
8331 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
8332 /// `Node` buffer counts remain reserved until the orphaned node is later
8333 /// cleaned up.
8334 ///
8335 /// If the `Node` exceeds a fairly large number of attached eventpair server
8336 /// ends, a log message will indicate this and the `Node` (and the
8337 /// appropriate) sub-tree will fail.
8338 ///
8339 /// The `server_end` will remain open when
8340 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
8341 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
8342 /// [`fuchsia.sysmem2/BufferCollection`].
8343 ///
8344 /// This message can also be used with a
8345 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8346 AttachNodeTracking {
8347 payload: NodeAttachNodeTrackingRequest,
8348 control_handle: BufferCollectionTokenControlHandle,
8349 },
8350 /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
8351 /// one, referring to the same buffer collection.
8352 ///
8353 /// The created tokens are children of this token in the
8354 /// [`fuchsia.sysmem2/Node`] heirarchy.
8355 ///
8356 /// This method can be used to add more participants, by transferring the
8357 /// newly created tokens to additional participants.
8358 ///
8359 /// A new token will be returned for each entry in the
8360 /// `rights_attenuation_masks` array.
8361 ///
8362 /// If the called token may not actually be a valid token due to a
8363 /// potentially hostile/untrusted provider of the token, consider using
8364 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8365 /// instead of potentially getting stuck indefinitely if
8366 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
8367 /// due to the calling token not being a real token.
8368 ///
8369 /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
8370 /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
8371 /// method, because the sync step is included in this call, at the cost of a
8372 /// round trip during this call.
8373 ///
8374 /// All tokens must be turned in to sysmem via
8375 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8376 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8377 /// successfully allocate buffers (or to logically allocate buffers in the
8378 /// case of subtrees involving
8379 /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
8380 ///
8381 /// All table fields are currently required.
8382 ///
8383 /// + request `rights_attenuation_mask` In each entry of
8384 /// `rights_attenuation_masks`, rights bits that are zero will be absent
8385 /// in the buffer VMO rights obtainable via the corresponding returned
8386 /// token. This allows an initiator or intermediary participant to
8387 /// attenuate the rights available to a participant. This does not allow a
8388 /// participant to gain rights that the participant doesn't already have.
8389 /// The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
8390 /// attenuation should be applied.
8391 /// - response `tokens` The client ends of each newly created token.
8392 DuplicateSync {
8393 payload: BufferCollectionTokenDuplicateSyncRequest,
8394 responder: BufferCollectionTokenDuplicateSyncResponder,
8395 },
8396 /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
8397 /// one, referring to the same buffer collection.
8398 ///
8399 /// The created token is a child of this token in the
8400 /// [`fuchsia.sysmem2/Node`] heirarchy.
8401 ///
8402 /// This method can be used to add a participant, by transferring the newly
8403 /// created token to another participant.
8404 ///
8405 /// This one-way message can be used instead of the two-way
8406 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
8407 /// performance sensitive cases where it would be undesireable to wait for
8408 /// sysmem to respond to
8409 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
8410 /// client code isn't structured to make it easy to duplicate all the needed
8411 /// tokens at once.
8412 ///
8413 /// After sending one or more `Duplicate` messages, and before sending the
8414 /// newly created child tokens to other participants (or to other
8415 /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
8416 /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
8417 /// `Sync` call can be made on the token, or on the `BufferCollection`
8418 /// obtained by passing this token to `BindSharedCollection`. Either will
8419 /// ensure that the server knows about the tokens created via `Duplicate`
8420 /// before the other participant sends the token to the server via separate
8421 /// `Allocator` channel.
8422 ///
8423 /// All tokens must be turned in via
8424 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8425 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8426 /// successfully allocate buffers.
8427 ///
8428 /// All table fields are currently required.
8429 ///
8430 /// + request `rights_attenuation_mask` The rights bits that are zero in
8431 /// this mask will be absent in the buffer VMO rights obtainable via the
8432 /// client end of `token_request`. This allows an initiator or
8433 /// intermediary participant to attenuate the rights available to a
8434 /// delegate participant. This does not allow a participant to gain rights
8435 /// that the participant doesn't already have. The value
8436 /// `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
8437 /// should be applied.
8438 /// + These values for rights_attenuation_mask result in no attenuation:
8439 /// + `ZX_RIGHT_SAME_RIGHTS` (preferred)
8440 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is
8441 /// computed)
8442 /// + 0 (deprecated - do not use 0 - an ERROR will go to the log)
8443 /// + request `token_request` is the server end of a `BufferCollectionToken`
8444 /// channel. The client end of this channel acts as another participant in
8445 /// the shared buffer collection.
8446 Duplicate {
8447 payload: BufferCollectionTokenDuplicateRequest,
8448 control_handle: BufferCollectionTokenControlHandle,
8449 },
8450 /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
8451 ///
8452 /// When the `BufferCollectionToken` is converted to a
8453 /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
8454 /// the `BufferCollection` also.
8455 ///
8456 /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
8457 /// client end without having sent
8458 /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
8459 /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
8460 /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
8461 /// to the root `Node`, which fails the whole buffer collection. In
8462 /// contrast, a dispensable `Node` can fail after buffers are allocated
8463 /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
8464 /// heirarchy.
8465 ///
8466 /// The dispensable `Node` participates in constraints aggregation along
8467 /// with its parent before buffer allocation. If the dispensable `Node`
8468 /// fails before buffers are allocated, the failure propagates to the
8469 /// dispensable `Node`'s parent.
8470 ///
8471 /// After buffers are allocated, failure of the dispensable `Node` (or any
8472 /// child of the dispensable `Node`) does not propagate to the dispensable
8473 /// `Node`'s parent. Failure does propagate from a normal child of a
8474 /// dispensable `Node` to the dispensable `Node`. Failure of a child is
8475 /// blocked from reaching its parent if the child is attached using
8476 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
8477 /// dispensable and the failure occurred after allocation.
8478 ///
8479 /// A dispensable `Node` can be used in cases where a participant needs to
8480 /// provide constraints, but after buffers are allocated, the participant
8481 /// can fail without causing buffer collection failure from the parent
8482 /// `Node`'s point of view.
8483 ///
8484 /// In contrast, `BufferCollection.AttachToken` can be used to create a
8485 /// `BufferCollectionToken` which does not participate in constraints
8486 /// aggregation with its parent `Node`, and whose failure at any time does
8487 /// not propagate to its parent `Node`, and whose potential delay providing
8488 /// constraints does not prevent the parent `Node` from completing its
8489 /// buffer allocation.
8490 ///
8491 /// An initiator (creator of the root `Node` using
8492 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
8493 /// scenarios choose to initially use a dispensable `Node` for a first
8494 /// instance of a participant, and then later if the first instance of that
8495 /// participant fails, a new second instance of that participant my be given
8496 /// a `BufferCollectionToken` created with `AttachToken`.
8497 ///
8498 /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
8499 /// shortly before sending the dispensable `BufferCollectionToken` to a
8500 /// delegate participant. Because `SetDispensable` prevents propagation of
8501 /// child `Node` failure to parent `Node`(s), if the client was relying on
8502 /// noticing child failure via failure of the parent `Node` retained by the
8503 /// client, the client may instead need to notice failure via other means.
8504 /// If other means aren't available/convenient, the client can instead
8505 /// retain the dispensable `Node` and create a child `Node` under that to
8506 /// send to the delegate participant, retaining this `Node` in order to
8507 /// notice failure of the subtree rooted at this `Node` via this `Node`'s
8508 /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
8509 /// (e.g. starting a new instance of the delegate participant and handing it
8510 /// a `BufferCollectionToken` created using
8511 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
8512 /// and clean up in a client-specific way).
8513 ///
8514 /// While it is possible (and potentially useful) to `SetDispensable` on a
8515 /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
8516 /// to later replace a failed dispensable `Node` that was a direct child of
8517 /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
8518 /// (since there's no `AttachToken` on a group). Instead, to enable
8519 /// `AttachToken` replacement in this case, create an additional
8520 /// non-dispensable token that's a direct child of the group and make the
8521 /// existing dispensable token a child of the additional token. This way,
8522 /// the additional token that is a direct child of the group has
8523 /// `BufferCollection.AttachToken` which can be used to replace the failed
8524 /// dispensable token.
8525 ///
8526 /// `SetDispensable` on an already-dispensable token is idempotent.
8527 SetDispensable { control_handle: BufferCollectionTokenControlHandle },
8528 /// Create a logical OR among a set of tokens, called a
8529 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8530 ///
8531 /// Most sysmem clients and many participants don't need to care about this
8532 /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
8533 /// a participant wants to attempt to include one set of delegate
8534 /// participants, but if constraints don't combine successfully that way,
8535 /// fall back to a different (possibly overlapping) set of delegate
8536 /// participants, and/or fall back to a less demanding strategy (in terms of
8537 /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
8538 /// across all involved delegate participants). In such cases, a
8539 /// `BufferCollectionTokenGroup` is useful.
8540 ///
8541 /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
8542 /// child [`fuchsia.sysmem2/BufferCollectionToken`](s). The child tokens
8543 /// which are not selected during aggregation will fail (close), which a
8544 /// potential participant should notice when their `BufferCollection`
8545 /// channel client endpoint sees PEER_CLOSED, allowing the participant to
8546 /// clean up the speculative usage that didn't end up happening (this is
8547 /// simimlar to a normal `BufferCollection` server end closing on failure to
8548 /// allocate a logical buffer collection or later async failure of a buffer
8549 /// collection).
8550 ///
8551 /// See comments on protocol `BufferCollectionTokenGroup`.
8552 ///
8553 /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
8554 /// applied to the whole group can be achieved with a
8555 /// `BufferCollectionToken` for this purpose as a direct parent of the
8556 /// `BufferCollectionTokenGroup`.
8557 ///
8558 /// All table fields are currently required.
8559 ///
8560 /// + request `group_request` The server end of a
8561 /// `BufferCollectionTokenGroup` channel to be served by sysmem.
8562 CreateBufferCollectionTokenGroup {
8563 payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8564 control_handle: BufferCollectionTokenControlHandle,
8565 },
8566 /// An interaction was received which does not match any known method.
8567 #[non_exhaustive]
8568 _UnknownMethod {
8569 /// Ordinal of the method that was called.
8570 ordinal: u64,
8571 control_handle: BufferCollectionTokenControlHandle,
8572 method_type: fidl::MethodType,
8573 },
8574}
8575
8576impl BufferCollectionTokenRequest {
8577 #[allow(irrefutable_let_patterns)]
8578 pub fn into_sync(self) -> Option<(BufferCollectionTokenSyncResponder)> {
8579 if let BufferCollectionTokenRequest::Sync { responder } = self {
8580 Some((responder))
8581 } else {
8582 None
8583 }
8584 }
8585
8586 #[allow(irrefutable_let_patterns)]
8587 pub fn into_release(self) -> Option<(BufferCollectionTokenControlHandle)> {
8588 if let BufferCollectionTokenRequest::Release { control_handle } = self {
8589 Some((control_handle))
8590 } else {
8591 None
8592 }
8593 }
8594
8595 #[allow(irrefutable_let_patterns)]
8596 pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionTokenControlHandle)> {
8597 if let BufferCollectionTokenRequest::SetName { payload, control_handle } = self {
8598 Some((payload, control_handle))
8599 } else {
8600 None
8601 }
8602 }
8603
8604 #[allow(irrefutable_let_patterns)]
8605 pub fn into_set_debug_client_info(
8606 self,
8607 ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenControlHandle)> {
8608 if let BufferCollectionTokenRequest::SetDebugClientInfo { payload, control_handle } = self {
8609 Some((payload, control_handle))
8610 } else {
8611 None
8612 }
8613 }
8614
8615 #[allow(irrefutable_let_patterns)]
8616 pub fn into_set_debug_timeout_log_deadline(
8617 self,
8618 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenControlHandle)> {
8619 if let BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {
8620 payload,
8621 control_handle,
8622 } = self
8623 {
8624 Some((payload, control_handle))
8625 } else {
8626 None
8627 }
8628 }
8629
8630 #[allow(irrefutable_let_patterns)]
8631 pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenControlHandle)> {
8632 if let BufferCollectionTokenRequest::SetVerboseLogging { control_handle } = self {
8633 Some((control_handle))
8634 } else {
8635 None
8636 }
8637 }
8638
8639 #[allow(irrefutable_let_patterns)]
8640 pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGetNodeRefResponder)> {
8641 if let BufferCollectionTokenRequest::GetNodeRef { responder } = self {
8642 Some((responder))
8643 } else {
8644 None
8645 }
8646 }
8647
8648 #[allow(irrefutable_let_patterns)]
8649 pub fn into_is_alternate_for(
8650 self,
8651 ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenIsAlternateForResponder)> {
8652 if let BufferCollectionTokenRequest::IsAlternateFor { payload, responder } = self {
8653 Some((payload, responder))
8654 } else {
8655 None
8656 }
8657 }
8658
8659 #[allow(irrefutable_let_patterns)]
8660 pub fn into_get_buffer_collection_id(
8661 self,
8662 ) -> Option<(BufferCollectionTokenGetBufferCollectionIdResponder)> {
8663 if let BufferCollectionTokenRequest::GetBufferCollectionId { responder } = self {
8664 Some((responder))
8665 } else {
8666 None
8667 }
8668 }
8669
8670 #[allow(irrefutable_let_patterns)]
8671 pub fn into_set_weak(self) -> Option<(BufferCollectionTokenControlHandle)> {
8672 if let BufferCollectionTokenRequest::SetWeak { control_handle } = self {
8673 Some((control_handle))
8674 } else {
8675 None
8676 }
8677 }
8678
8679 #[allow(irrefutable_let_patterns)]
8680 pub fn into_set_weak_ok(
8681 self,
8682 ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenControlHandle)> {
8683 if let BufferCollectionTokenRequest::SetWeakOk { payload, control_handle } = self {
8684 Some((payload, control_handle))
8685 } else {
8686 None
8687 }
8688 }
8689
8690 #[allow(irrefutable_let_patterns)]
8691 pub fn into_attach_node_tracking(
8692 self,
8693 ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenControlHandle)> {
8694 if let BufferCollectionTokenRequest::AttachNodeTracking { payload, control_handle } = self {
8695 Some((payload, control_handle))
8696 } else {
8697 None
8698 }
8699 }
8700
8701 #[allow(irrefutable_let_patterns)]
8702 pub fn into_duplicate_sync(
8703 self,
8704 ) -> Option<(
8705 BufferCollectionTokenDuplicateSyncRequest,
8706 BufferCollectionTokenDuplicateSyncResponder,
8707 )> {
8708 if let BufferCollectionTokenRequest::DuplicateSync { payload, responder } = self {
8709 Some((payload, responder))
8710 } else {
8711 None
8712 }
8713 }
8714
8715 #[allow(irrefutable_let_patterns)]
8716 pub fn into_duplicate(
8717 self,
8718 ) -> Option<(BufferCollectionTokenDuplicateRequest, BufferCollectionTokenControlHandle)> {
8719 if let BufferCollectionTokenRequest::Duplicate { payload, control_handle } = self {
8720 Some((payload, control_handle))
8721 } else {
8722 None
8723 }
8724 }
8725
8726 #[allow(irrefutable_let_patterns)]
8727 pub fn into_set_dispensable(self) -> Option<(BufferCollectionTokenControlHandle)> {
8728 if let BufferCollectionTokenRequest::SetDispensable { control_handle } = self {
8729 Some((control_handle))
8730 } else {
8731 None
8732 }
8733 }
8734
8735 #[allow(irrefutable_let_patterns)]
8736 pub fn into_create_buffer_collection_token_group(
8737 self,
8738 ) -> Option<(
8739 BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8740 BufferCollectionTokenControlHandle,
8741 )> {
8742 if let BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {
8743 payload,
8744 control_handle,
8745 } = self
8746 {
8747 Some((payload, control_handle))
8748 } else {
8749 None
8750 }
8751 }
8752
8753 /// Name of the method defined in FIDL
8754 pub fn method_name(&self) -> &'static str {
8755 match *self {
8756 BufferCollectionTokenRequest::Sync { .. } => "sync",
8757 BufferCollectionTokenRequest::Release { .. } => "release",
8758 BufferCollectionTokenRequest::SetName { .. } => "set_name",
8759 BufferCollectionTokenRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
8760 BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline { .. } => {
8761 "set_debug_timeout_log_deadline"
8762 }
8763 BufferCollectionTokenRequest::SetVerboseLogging { .. } => "set_verbose_logging",
8764 BufferCollectionTokenRequest::GetNodeRef { .. } => "get_node_ref",
8765 BufferCollectionTokenRequest::IsAlternateFor { .. } => "is_alternate_for",
8766 BufferCollectionTokenRequest::GetBufferCollectionId { .. } => {
8767 "get_buffer_collection_id"
8768 }
8769 BufferCollectionTokenRequest::SetWeak { .. } => "set_weak",
8770 BufferCollectionTokenRequest::SetWeakOk { .. } => "set_weak_ok",
8771 BufferCollectionTokenRequest::AttachNodeTracking { .. } => "attach_node_tracking",
8772 BufferCollectionTokenRequest::DuplicateSync { .. } => "duplicate_sync",
8773 BufferCollectionTokenRequest::Duplicate { .. } => "duplicate",
8774 BufferCollectionTokenRequest::SetDispensable { .. } => "set_dispensable",
8775 BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup { .. } => {
8776 "create_buffer_collection_token_group"
8777 }
8778 BufferCollectionTokenRequest::_UnknownMethod {
8779 method_type: fidl::MethodType::OneWay,
8780 ..
8781 } => "unknown one-way method",
8782 BufferCollectionTokenRequest::_UnknownMethod {
8783 method_type: fidl::MethodType::TwoWay,
8784 ..
8785 } => "unknown two-way method",
8786 }
8787 }
8788}
8789
8790#[derive(Debug, Clone)]
8791pub struct BufferCollectionTokenControlHandle {
8792 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
8793}
8794
8795impl fidl::endpoints::ControlHandle for BufferCollectionTokenControlHandle {
8796 fn shutdown(&self) {
8797 self.inner.shutdown()
8798 }
8799
8800 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
8801 self.inner.shutdown_with_epitaph(status)
8802 }
8803
8804 fn is_closed(&self) -> bool {
8805 self.inner.channel().is_closed()
8806 }
8807 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
8808 self.inner.channel().on_closed()
8809 }
8810
8811 #[cfg(target_os = "fuchsia")]
8812 fn signal_peer(
8813 &self,
8814 clear_mask: zx::Signals,
8815 set_mask: zx::Signals,
8816 ) -> Result<(), zx_status::Status> {
8817 use fidl::Peered;
8818 self.inner.channel().signal_peer(clear_mask, set_mask)
8819 }
8820}
8821
8822impl BufferCollectionTokenControlHandle {}
8823
8824#[must_use = "FIDL methods require a response to be sent"]
8825#[derive(Debug)]
8826pub struct BufferCollectionTokenSyncResponder {
8827 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8828 tx_id: u32,
8829}
8830
8831/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8832/// if the responder is dropped without sending a response, so that the client
8833/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8834impl std::ops::Drop for BufferCollectionTokenSyncResponder {
8835 fn drop(&mut self) {
8836 self.control_handle.shutdown();
8837 // Safety: drops once, never accessed again
8838 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8839 }
8840}
8841
8842impl fidl::endpoints::Responder for BufferCollectionTokenSyncResponder {
8843 type ControlHandle = BufferCollectionTokenControlHandle;
8844
8845 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8846 &self.control_handle
8847 }
8848
8849 fn drop_without_shutdown(mut self) {
8850 // Safety: drops once, never accessed again due to mem::forget
8851 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8852 // Prevent Drop from running (which would shut down the channel)
8853 std::mem::forget(self);
8854 }
8855}
8856
8857impl BufferCollectionTokenSyncResponder {
8858 /// Sends a response to the FIDL transaction.
8859 ///
8860 /// Sets the channel to shutdown if an error occurs.
8861 pub fn send(self) -> Result<(), fidl::Error> {
8862 let _result = self.send_raw();
8863 if _result.is_err() {
8864 self.control_handle.shutdown();
8865 }
8866 self.drop_without_shutdown();
8867 _result
8868 }
8869
8870 /// Similar to "send" but does not shutdown the channel if an error occurs.
8871 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
8872 let _result = self.send_raw();
8873 self.drop_without_shutdown();
8874 _result
8875 }
8876
8877 fn send_raw(&self) -> Result<(), fidl::Error> {
8878 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
8879 fidl::encoding::Flexible::new(()),
8880 self.tx_id,
8881 0x11ac2555cf575b54,
8882 fidl::encoding::DynamicFlags::FLEXIBLE,
8883 )
8884 }
8885}
8886
8887#[must_use = "FIDL methods require a response to be sent"]
8888#[derive(Debug)]
8889pub struct BufferCollectionTokenGetNodeRefResponder {
8890 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8891 tx_id: u32,
8892}
8893
8894/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8895/// if the responder is dropped without sending a response, so that the client
8896/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8897impl std::ops::Drop for BufferCollectionTokenGetNodeRefResponder {
8898 fn drop(&mut self) {
8899 self.control_handle.shutdown();
8900 // Safety: drops once, never accessed again
8901 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8902 }
8903}
8904
8905impl fidl::endpoints::Responder for BufferCollectionTokenGetNodeRefResponder {
8906 type ControlHandle = BufferCollectionTokenControlHandle;
8907
8908 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8909 &self.control_handle
8910 }
8911
8912 fn drop_without_shutdown(mut self) {
8913 // Safety: drops once, never accessed again due to mem::forget
8914 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8915 // Prevent Drop from running (which would shut down the channel)
8916 std::mem::forget(self);
8917 }
8918}
8919
8920impl BufferCollectionTokenGetNodeRefResponder {
8921 /// Sends a response to the FIDL transaction.
8922 ///
8923 /// Sets the channel to shutdown if an error occurs.
8924 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8925 let _result = self.send_raw(payload);
8926 if _result.is_err() {
8927 self.control_handle.shutdown();
8928 }
8929 self.drop_without_shutdown();
8930 _result
8931 }
8932
8933 /// Similar to "send" but does not shutdown the channel if an error occurs.
8934 pub fn send_no_shutdown_on_err(
8935 self,
8936 mut payload: NodeGetNodeRefResponse,
8937 ) -> Result<(), fidl::Error> {
8938 let _result = self.send_raw(payload);
8939 self.drop_without_shutdown();
8940 _result
8941 }
8942
8943 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8944 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
8945 fidl::encoding::Flexible::new(&mut payload),
8946 self.tx_id,
8947 0x5b3d0e51614df053,
8948 fidl::encoding::DynamicFlags::FLEXIBLE,
8949 )
8950 }
8951}
8952
8953#[must_use = "FIDL methods require a response to be sent"]
8954#[derive(Debug)]
8955pub struct BufferCollectionTokenIsAlternateForResponder {
8956 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8957 tx_id: u32,
8958}
8959
8960/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8961/// if the responder is dropped without sending a response, so that the client
8962/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8963impl std::ops::Drop for BufferCollectionTokenIsAlternateForResponder {
8964 fn drop(&mut self) {
8965 self.control_handle.shutdown();
8966 // Safety: drops once, never accessed again
8967 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8968 }
8969}
8970
8971impl fidl::endpoints::Responder for BufferCollectionTokenIsAlternateForResponder {
8972 type ControlHandle = BufferCollectionTokenControlHandle;
8973
8974 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8975 &self.control_handle
8976 }
8977
8978 fn drop_without_shutdown(mut self) {
8979 // Safety: drops once, never accessed again due to mem::forget
8980 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8981 // Prevent Drop from running (which would shut down the channel)
8982 std::mem::forget(self);
8983 }
8984}
8985
8986impl BufferCollectionTokenIsAlternateForResponder {
8987 /// Sends a response to the FIDL transaction.
8988 ///
8989 /// Sets the channel to shutdown if an error occurs.
8990 pub fn send(
8991 self,
8992 mut result: Result<&NodeIsAlternateForResponse, Error>,
8993 ) -> Result<(), fidl::Error> {
8994 let _result = self.send_raw(result);
8995 if _result.is_err() {
8996 self.control_handle.shutdown();
8997 }
8998 self.drop_without_shutdown();
8999 _result
9000 }
9001
9002 /// Similar to "send" but does not shutdown the channel if an error occurs.
9003 pub fn send_no_shutdown_on_err(
9004 self,
9005 mut result: Result<&NodeIsAlternateForResponse, Error>,
9006 ) -> Result<(), fidl::Error> {
9007 let _result = self.send_raw(result);
9008 self.drop_without_shutdown();
9009 _result
9010 }
9011
9012 fn send_raw(
9013 &self,
9014 mut result: Result<&NodeIsAlternateForResponse, Error>,
9015 ) -> Result<(), fidl::Error> {
9016 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
9017 NodeIsAlternateForResponse,
9018 Error,
9019 >>(
9020 fidl::encoding::FlexibleResult::new(result),
9021 self.tx_id,
9022 0x3a58e00157e0825,
9023 fidl::encoding::DynamicFlags::FLEXIBLE,
9024 )
9025 }
9026}
9027
9028#[must_use = "FIDL methods require a response to be sent"]
9029#[derive(Debug)]
9030pub struct BufferCollectionTokenGetBufferCollectionIdResponder {
9031 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9032 tx_id: u32,
9033}
9034
9035/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9036/// if the responder is dropped without sending a response, so that the client
9037/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9038impl std::ops::Drop for BufferCollectionTokenGetBufferCollectionIdResponder {
9039 fn drop(&mut self) {
9040 self.control_handle.shutdown();
9041 // Safety: drops once, never accessed again
9042 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9043 }
9044}
9045
9046impl fidl::endpoints::Responder for BufferCollectionTokenGetBufferCollectionIdResponder {
9047 type ControlHandle = BufferCollectionTokenControlHandle;
9048
9049 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9050 &self.control_handle
9051 }
9052
9053 fn drop_without_shutdown(mut self) {
9054 // Safety: drops once, never accessed again due to mem::forget
9055 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9056 // Prevent Drop from running (which would shut down the channel)
9057 std::mem::forget(self);
9058 }
9059}
9060
9061impl BufferCollectionTokenGetBufferCollectionIdResponder {
9062 /// Sends a response to the FIDL transaction.
9063 ///
9064 /// Sets the channel to shutdown if an error occurs.
9065 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9066 let _result = self.send_raw(payload);
9067 if _result.is_err() {
9068 self.control_handle.shutdown();
9069 }
9070 self.drop_without_shutdown();
9071 _result
9072 }
9073
9074 /// Similar to "send" but does not shutdown the channel if an error occurs.
9075 pub fn send_no_shutdown_on_err(
9076 self,
9077 mut payload: &NodeGetBufferCollectionIdResponse,
9078 ) -> Result<(), fidl::Error> {
9079 let _result = self.send_raw(payload);
9080 self.drop_without_shutdown();
9081 _result
9082 }
9083
9084 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9085 self.control_handle
9086 .inner
9087 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
9088 fidl::encoding::Flexible::new(payload),
9089 self.tx_id,
9090 0x77d19a494b78ba8c,
9091 fidl::encoding::DynamicFlags::FLEXIBLE,
9092 )
9093 }
9094}
9095
9096#[must_use = "FIDL methods require a response to be sent"]
9097#[derive(Debug)]
9098pub struct BufferCollectionTokenDuplicateSyncResponder {
9099 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9100 tx_id: u32,
9101}
9102
9103/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9104/// if the responder is dropped without sending a response, so that the client
9105/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9106impl std::ops::Drop for BufferCollectionTokenDuplicateSyncResponder {
9107 fn drop(&mut self) {
9108 self.control_handle.shutdown();
9109 // Safety: drops once, never accessed again
9110 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9111 }
9112}
9113
9114impl fidl::endpoints::Responder for BufferCollectionTokenDuplicateSyncResponder {
9115 type ControlHandle = BufferCollectionTokenControlHandle;
9116
9117 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9118 &self.control_handle
9119 }
9120
9121 fn drop_without_shutdown(mut self) {
9122 // Safety: drops once, never accessed again due to mem::forget
9123 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9124 // Prevent Drop from running (which would shut down the channel)
9125 std::mem::forget(self);
9126 }
9127}
9128
9129impl BufferCollectionTokenDuplicateSyncResponder {
9130 /// Sends a response to the FIDL transaction.
9131 ///
9132 /// Sets the channel to shutdown if an error occurs.
9133 pub fn send(
9134 self,
9135 mut payload: BufferCollectionTokenDuplicateSyncResponse,
9136 ) -> Result<(), fidl::Error> {
9137 let _result = self.send_raw(payload);
9138 if _result.is_err() {
9139 self.control_handle.shutdown();
9140 }
9141 self.drop_without_shutdown();
9142 _result
9143 }
9144
9145 /// Similar to "send" but does not shutdown the channel if an error occurs.
9146 pub fn send_no_shutdown_on_err(
9147 self,
9148 mut payload: BufferCollectionTokenDuplicateSyncResponse,
9149 ) -> Result<(), fidl::Error> {
9150 let _result = self.send_raw(payload);
9151 self.drop_without_shutdown();
9152 _result
9153 }
9154
9155 fn send_raw(
9156 &self,
9157 mut payload: BufferCollectionTokenDuplicateSyncResponse,
9158 ) -> Result<(), fidl::Error> {
9159 self.control_handle.inner.send::<fidl::encoding::FlexibleType<
9160 BufferCollectionTokenDuplicateSyncResponse,
9161 >>(
9162 fidl::encoding::Flexible::new(&mut payload),
9163 self.tx_id,
9164 0x1c1af9919d1ca45c,
9165 fidl::encoding::DynamicFlags::FLEXIBLE,
9166 )
9167 }
9168}
9169
9170#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
9171pub struct BufferCollectionTokenGroupMarker;
9172
9173impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenGroupMarker {
9174 type Proxy = BufferCollectionTokenGroupProxy;
9175 type RequestStream = BufferCollectionTokenGroupRequestStream;
9176 #[cfg(target_os = "fuchsia")]
9177 type SynchronousProxy = BufferCollectionTokenGroupSynchronousProxy;
9178
9179 const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionTokenGroup";
9180}
9181
9182pub trait BufferCollectionTokenGroupProxyInterface: Send + Sync {
9183 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
9184 fn r#sync(&self) -> Self::SyncResponseFut;
9185 fn r#release(&self) -> Result<(), fidl::Error>;
9186 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
9187 fn r#set_debug_client_info(
9188 &self,
9189 payload: &NodeSetDebugClientInfoRequest,
9190 ) -> Result<(), fidl::Error>;
9191 fn r#set_debug_timeout_log_deadline(
9192 &self,
9193 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9194 ) -> Result<(), fidl::Error>;
9195 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
9196 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
9197 + Send;
9198 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
9199 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
9200 + Send;
9201 fn r#is_alternate_for(
9202 &self,
9203 payload: NodeIsAlternateForRequest,
9204 ) -> Self::IsAlternateForResponseFut;
9205 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
9206 + Send;
9207 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
9208 fn r#set_weak(&self) -> Result<(), fidl::Error>;
9209 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
9210 fn r#attach_node_tracking(
9211 &self,
9212 payload: NodeAttachNodeTrackingRequest,
9213 ) -> Result<(), fidl::Error>;
9214 fn r#create_child(
9215 &self,
9216 payload: BufferCollectionTokenGroupCreateChildRequest,
9217 ) -> Result<(), fidl::Error>;
9218 type CreateChildrenSyncResponseFut: std::future::Future<
9219 Output = Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error>,
9220 > + Send;
9221 fn r#create_children_sync(
9222 &self,
9223 payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9224 ) -> Self::CreateChildrenSyncResponseFut;
9225 fn r#all_children_present(&self) -> Result<(), fidl::Error>;
9226}
9227#[derive(Debug)]
9228#[cfg(target_os = "fuchsia")]
9229pub struct BufferCollectionTokenGroupSynchronousProxy {
9230 client: fidl::client::sync::Client,
9231}
9232
9233#[cfg(target_os = "fuchsia")]
9234impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenGroupSynchronousProxy {
9235 type Proxy = BufferCollectionTokenGroupProxy;
9236 type Protocol = BufferCollectionTokenGroupMarker;
9237
9238 fn from_channel(inner: fidl::Channel) -> Self {
9239 Self::new(inner)
9240 }
9241
9242 fn into_channel(self) -> fidl::Channel {
9243 self.client.into_channel()
9244 }
9245
9246 fn as_channel(&self) -> &fidl::Channel {
9247 self.client.as_channel()
9248 }
9249}
9250
9251#[cfg(target_os = "fuchsia")]
9252impl BufferCollectionTokenGroupSynchronousProxy {
9253 pub fn new(channel: fidl::Channel) -> Self {
9254 Self { client: fidl::client::sync::Client::new(channel) }
9255 }
9256
9257 pub fn into_channel(self) -> fidl::Channel {
9258 self.client.into_channel()
9259 }
9260
9261 /// Waits until an event arrives and returns it. It is safe for other
9262 /// threads to make concurrent requests while waiting for an event.
9263 pub fn wait_for_event(
9264 &self,
9265 deadline: zx::MonotonicInstant,
9266 ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
9267 BufferCollectionTokenGroupEvent::decode(
9268 self.client.wait_for_event::<BufferCollectionTokenGroupMarker>(deadline)?,
9269 )
9270 }
9271
9272 /// Ensure that previous messages have been received server side. This is
9273 /// particularly useful after previous messages that created new tokens,
9274 /// because a token must be known to the sysmem server before sending the
9275 /// token to another participant.
9276 ///
9277 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9278 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9279 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9280 /// to mitigate the possibility of a hostile/fake
9281 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9282 /// Another way is to pass the token to
9283 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9284 /// the token as part of exchanging it for a
9285 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9286 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9287 /// of stalling.
9288 ///
9289 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9290 /// and then starting and completing a `Sync`, it's then safe to send the
9291 /// `BufferCollectionToken` client ends to other participants knowing the
9292 /// server will recognize the tokens when they're sent by the other
9293 /// participants to sysmem in a
9294 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9295 /// efficient way to create tokens while avoiding unnecessary round trips.
9296 ///
9297 /// Other options include waiting for each
9298 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9299 /// individually (using separate call to `Sync` after each), or calling
9300 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9301 /// converted to a `BufferCollection` via
9302 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9303 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9304 /// the sync step and can create multiple tokens at once.
9305 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
9306 let _response = self.client.send_query::<
9307 fidl::encoding::EmptyPayload,
9308 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
9309 BufferCollectionTokenGroupMarker,
9310 >(
9311 (),
9312 0x11ac2555cf575b54,
9313 fidl::encoding::DynamicFlags::FLEXIBLE,
9314 ___deadline,
9315 )?
9316 .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
9317 Ok(_response)
9318 }
9319
9320 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9321 ///
9322 /// Normally a participant will convert a `BufferCollectionToken` into a
9323 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9324 /// `Release` via the token (and then close the channel immediately or
9325 /// shortly later in response to server closing the server end), which
9326 /// avoids causing buffer collection failure. Without a prior `Release`,
9327 /// closing the `BufferCollectionToken` client end will cause buffer
9328 /// collection failure.
9329 ///
9330 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9331 ///
9332 /// By default the server handles unexpected closure of a
9333 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9334 /// first) by failing the buffer collection. Partly this is to expedite
9335 /// closing VMO handles to reclaim memory when any participant fails. If a
9336 /// participant would like to cleanly close a `BufferCollection` without
9337 /// causing buffer collection failure, the participant can send `Release`
9338 /// before closing the `BufferCollection` client end. The `Release` can
9339 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
9340 /// buffer collection won't require constraints from this node in order to
9341 /// allocate. If after `SetConstraints`, the constraints are retained and
9342 /// aggregated, despite the lack of `BufferCollection` connection at the
9343 /// time of constraints aggregation.
9344 ///
9345 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
9346 ///
9347 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
9348 /// end (without `Release` first) will trigger failure of the buffer
9349 /// collection. To close a `BufferCollectionTokenGroup` channel without
9350 /// failing the buffer collection, ensure that AllChildrenPresent() has been
9351 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
9352 /// client end.
9353 ///
9354 /// If `Release` occurs before
9355 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
9356 /// buffer collection will fail (triggered by reception of `Release` without
9357 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
9358 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
9359 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
9360 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
9361 /// close requires `AllChildrenPresent` (if not already sent), then
9362 /// `Release`, then close client end.
9363 ///
9364 /// If `Release` occurs after `AllChildrenPresent`, the children and all
9365 /// their constraints remain intact (just as they would if the
9366 /// `BufferCollectionTokenGroup` channel had remained open), and the client
9367 /// end close doesn't trigger buffer collection failure.
9368 ///
9369 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
9370 ///
9371 /// For brevity, the per-channel-protocol paragraphs above ignore the
9372 /// separate failure domain created by
9373 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
9374 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
9375 /// unexpectedly closes (without `Release` first) and that client end is
9376 /// under a failure domain, instead of failing the whole buffer collection,
9377 /// the failure domain is failed, but the buffer collection itself is
9378 /// isolated from failure of the failure domain. Such failure domains can be
9379 /// nested, in which case only the inner-most failure domain in which the
9380 /// `Node` resides fails.
9381 pub fn r#release(&self) -> Result<(), fidl::Error> {
9382 self.client.send::<fidl::encoding::EmptyPayload>(
9383 (),
9384 0x6a5cae7d6d6e04c6,
9385 fidl::encoding::DynamicFlags::FLEXIBLE,
9386 )
9387 }
9388
9389 /// Set a name for VMOs in this buffer collection.
9390 ///
9391 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
9392 /// will be truncated to fit. The name of the vmo will be suffixed with the
9393 /// buffer index within the collection (if the suffix fits within
9394 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
9395 /// listed in the inspect data.
9396 ///
9397 /// The name only affects VMOs allocated after the name is set; this call
9398 /// does not rename existing VMOs. If multiple clients set different names
9399 /// then the larger priority value will win. Setting a new name with the
9400 /// same priority as a prior name doesn't change the name.
9401 ///
9402 /// All table fields are currently required.
9403 ///
9404 /// + request `priority` The name is only set if this is the first `SetName`
9405 /// or if `priority` is greater than any previous `priority` value in
9406 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
9407 /// + request `name` The name for VMOs created under this buffer collection.
9408 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
9409 self.client.send::<NodeSetNameRequest>(
9410 payload,
9411 0xb41f1624f48c1e9,
9412 fidl::encoding::DynamicFlags::FLEXIBLE,
9413 )
9414 }
9415
9416 /// Set information about the current client that can be used by sysmem to
9417 /// help diagnose leaking memory and allocation stalls waiting for a
9418 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
9419 ///
9420 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
9421 /// `Node`(s) derived from this `Node`, unless overriden by
9422 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
9423 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
9424 ///
9425 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
9426 /// `Allocator` is the most efficient way to ensure that all
9427 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
9428 /// set, and is also more efficient than separately sending the same debug
9429 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
9430 /// created [`fuchsia.sysmem2/Node`].
9431 ///
9432 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
9433 /// indicate which client is closing their channel first, leading to subtree
9434 /// failure (which can be normal if the purpose of the subtree is over, but
9435 /// if happening earlier than expected, the client-channel-specific name can
9436 /// help diagnose where the failure is first coming from, from sysmem's
9437 /// point of view).
9438 ///
9439 /// All table fields are currently required.
9440 ///
9441 /// + request `name` This can be an arbitrary string, but the current
9442 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
9443 /// + request `id` This can be an arbitrary id, but the current process ID
9444 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
9445 pub fn r#set_debug_client_info(
9446 &self,
9447 mut payload: &NodeSetDebugClientInfoRequest,
9448 ) -> Result<(), fidl::Error> {
9449 self.client.send::<NodeSetDebugClientInfoRequest>(
9450 payload,
9451 0x5cde8914608d99b1,
9452 fidl::encoding::DynamicFlags::FLEXIBLE,
9453 )
9454 }
9455
9456 /// Sysmem logs a warning if sysmem hasn't seen
9457 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
9458 /// within 5 seconds after creation of a new collection.
9459 ///
9460 /// Clients can call this method to change when the log is printed. If
9461 /// multiple client set the deadline, it's unspecified which deadline will
9462 /// take effect.
9463 ///
9464 /// In most cases the default works well.
9465 ///
9466 /// All table fields are currently required.
9467 ///
9468 /// + request `deadline` The time at which sysmem will start trying to log
9469 /// the warning, unless all constraints are with sysmem by then.
9470 pub fn r#set_debug_timeout_log_deadline(
9471 &self,
9472 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9473 ) -> Result<(), fidl::Error> {
9474 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
9475 payload,
9476 0x716b0af13d5c0806,
9477 fidl::encoding::DynamicFlags::FLEXIBLE,
9478 )
9479 }
9480
9481 /// This enables verbose logging for the buffer collection.
9482 ///
9483 /// Verbose logging includes constraints set via
9484 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
9485 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
9486 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
9487 /// the tree of `Node`(s).
9488 ///
9489 /// Normally sysmem prints only a single line complaint when aggregation
9490 /// fails, with just the specific detailed reason that aggregation failed,
9491 /// with little surrounding context. While this is often enough to diagnose
9492 /// a problem if only a small change was made and everything was working
9493 /// before the small change, it's often not particularly helpful for getting
9494 /// a new buffer collection to work for the first time. Especially with
9495 /// more complex trees of nodes, involving things like
9496 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
9497 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
9498 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
9499 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
9500 /// looks like and why it's failing a logical allocation, or why a tree or
9501 /// subtree is failing sooner than expected.
9502 ///
9503 /// The intent of the extra logging is to be acceptable from a performance
9504 /// point of view, under the assumption that verbose logging is only enabled
9505 /// on a low number of buffer collections. If we're not tracking down a bug,
9506 /// we shouldn't send this message.
9507 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
9508 self.client.send::<fidl::encoding::EmptyPayload>(
9509 (),
9510 0x5209c77415b4dfad,
9511 fidl::encoding::DynamicFlags::FLEXIBLE,
9512 )
9513 }
9514
9515 /// This gets a handle that can be used as a parameter to
9516 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
9517 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
9518 /// client obtained this handle from this `Node`.
9519 ///
9520 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
9521 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
9522 /// despite the two calls typically being on different channels.
9523 ///
9524 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
9525 ///
9526 /// All table fields are currently required.
9527 ///
9528 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
9529 /// different `Node` channel, to prove that the client obtained the handle
9530 /// from this `Node`.
9531 pub fn r#get_node_ref(
9532 &self,
9533 ___deadline: zx::MonotonicInstant,
9534 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
9535 let _response = self.client.send_query::<
9536 fidl::encoding::EmptyPayload,
9537 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
9538 BufferCollectionTokenGroupMarker,
9539 >(
9540 (),
9541 0x5b3d0e51614df053,
9542 fidl::encoding::DynamicFlags::FLEXIBLE,
9543 ___deadline,
9544 )?
9545 .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
9546 Ok(_response)
9547 }
9548
9549 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
9550 /// rooted at a different child token of a common parent
9551 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
9552 /// passed-in `node_ref`.
9553 ///
9554 /// This call is for assisting with admission control de-duplication, and
9555 /// with debugging.
9556 ///
9557 /// The `node_ref` must be obtained using
9558 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
9559 ///
9560 /// The `node_ref` can be a duplicated handle; it's not necessary to call
9561 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
9562 ///
9563 /// If a calling token may not actually be a valid token at all due to a
9564 /// potentially hostile/untrusted provider of the token, call
9565 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
9566 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
9567 /// never responds due to a calling token not being a real token (not really
9568 /// talking to sysmem). Another option is to call
9569 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
9570 /// which also validates the token along with converting it to a
9571 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
9572 ///
9573 /// All table fields are currently required.
9574 ///
9575 /// - response `is_alternate`
9576 /// - true: The first parent node in common between the calling node and
9577 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
9578 /// that the calling `Node` and the `node_ref` `Node` will not have both
9579 /// their constraints apply - rather sysmem will choose one or the other
9580 /// of the constraints - never both. This is because only one child of
9581 /// a `BufferCollectionTokenGroup` is selected during logical
9582 /// allocation, with only that one child's subtree contributing to
9583 /// constraints aggregation.
9584 /// - false: The first parent node in common between the calling `Node`
9585 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
9586 /// Currently, this means the first parent node in common is a
9587 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
9588 /// `Release`ed). This means that the calling `Node` and the `node_ref`
9589 /// `Node` may have both their constraints apply during constraints
9590 /// aggregation of the logical allocation, if both `Node`(s) are
9591 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
9592 /// this case, there is no `BufferCollectionTokenGroup` that will
9593 /// directly prevent the two `Node`(s) from both being selected and
9594 /// their constraints both aggregated, but even when false, one or both
9595 /// `Node`(s) may still be eliminated from consideration if one or both
9596 /// `Node`(s) has a direct or indirect parent
9597 /// `BufferCollectionTokenGroup` which selects a child subtree other
9598 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
9599 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
9600 /// associated with the same buffer collection as the calling `Node`.
9601 /// Another reason for this error is if the `node_ref` is an
9602 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
9603 /// a real `node_ref` obtained from `GetNodeRef`.
9604 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
9605 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
9606 /// the needed rights expected on a real `node_ref`.
9607 /// * No other failing status codes are returned by this call. However,
9608 /// sysmem may add additional codes in future, so the client should have
9609 /// sensible default handling for any failing status code.
9610 pub fn r#is_alternate_for(
9611 &self,
9612 mut payload: NodeIsAlternateForRequest,
9613 ___deadline: zx::MonotonicInstant,
9614 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
9615 let _response = self.client.send_query::<
9616 NodeIsAlternateForRequest,
9617 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
9618 BufferCollectionTokenGroupMarker,
9619 >(
9620 &mut payload,
9621 0x3a58e00157e0825,
9622 fidl::encoding::DynamicFlags::FLEXIBLE,
9623 ___deadline,
9624 )?
9625 .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
9626 Ok(_response.map(|x| x))
9627 }
9628
9629 /// Get the buffer collection ID. This ID is also available from
9630 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
9631 /// within the collection).
9632 ///
9633 /// This call is mainly useful in situations where we can't convey a
9634 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
9635 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
9636 /// handle, which can be joined back up with a `BufferCollection` client end
9637 /// that was created via a different path. Prefer to convey a
9638 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
9639 ///
9640 /// Trusting a `buffer_collection_id` value from a source other than sysmem
9641 /// is analogous to trusting a koid value from a source other than zircon.
9642 /// Both should be avoided unless really necessary, and both require
9643 /// caution. In some situations it may be reasonable to refer to a
9644 /// pre-established `BufferCollection` by `buffer_collection_id` via a
9645 /// protocol for efficiency reasons, but an incoming value purporting to be
9646 /// a `buffer_collection_id` is not sufficient alone to justify granting the
9647 /// sender of the `buffer_collection_id` any capability. The sender must
9648 /// first prove to a receiver that the sender has/had a VMO or has/had a
9649 /// `BufferCollectionToken` to the same collection by sending a handle that
9650 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
9651 /// `buffer_collection_id` value. The receiver should take care to avoid
9652 /// assuming that a sender had a `BufferCollectionToken` in cases where the
9653 /// sender has only proven that the sender had a VMO.
9654 ///
9655 /// - response `buffer_collection_id` This ID is unique per buffer
9656 /// collection per boot. Each buffer is uniquely identified by the
9657 /// `buffer_collection_id` and `buffer_index` together.
9658 pub fn r#get_buffer_collection_id(
9659 &self,
9660 ___deadline: zx::MonotonicInstant,
9661 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
9662 let _response = self.client.send_query::<
9663 fidl::encoding::EmptyPayload,
9664 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
9665 BufferCollectionTokenGroupMarker,
9666 >(
9667 (),
9668 0x77d19a494b78ba8c,
9669 fidl::encoding::DynamicFlags::FLEXIBLE,
9670 ___deadline,
9671 )?
9672 .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
9673 Ok(_response)
9674 }
9675
9676 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
9677 /// created after this message to weak, which means that a client's `Node`
9678 /// client end (or a child created after this message) is not alone
9679 /// sufficient to keep allocated VMOs alive.
9680 ///
9681 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
9682 /// `close_weak_asap`.
9683 ///
9684 /// This message is only permitted before the `Node` becomes ready for
9685 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
9686 /// * `BufferCollectionToken`: any time
9687 /// * `BufferCollection`: before `SetConstraints`
9688 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
9689 ///
9690 /// Currently, no conversion from strong `Node` to weak `Node` after ready
9691 /// for allocation is provided, but a client can simulate that by creating
9692 /// an additional `Node` before allocation and setting that additional
9693 /// `Node` to weak, and then potentially at some point later sending
9694 /// `Release` and closing the client end of the client's strong `Node`, but
9695 /// keeping the client's weak `Node`.
9696 ///
9697 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
9698 /// collection failure (all `Node` client end(s) will see
9699 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
9700 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
9701 /// this situation until all `Node`(s) are ready for allocation. For initial
9702 /// allocation to succeed, at least one strong `Node` is required to exist
9703 /// at allocation time, but after that client receives VMO handles, that
9704 /// client can `BufferCollection.Release` and close the client end without
9705 /// causing this type of failure.
9706 ///
9707 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
9708 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
9709 /// separately as appropriate.
9710 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
9711 self.client.send::<fidl::encoding::EmptyPayload>(
9712 (),
9713 0x22dd3ea514eeffe1,
9714 fidl::encoding::DynamicFlags::FLEXIBLE,
9715 )
9716 }
9717
9718 /// This indicates to sysmem that the client is prepared to pay attention to
9719 /// `close_weak_asap`.
9720 ///
9721 /// If sent, this message must be before
9722 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
9723 ///
9724 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
9725 /// send this message before `WaitForAllBuffersAllocated`, or a parent
9726 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
9727 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
9728 /// trigger buffer collection failure.
9729 ///
9730 /// This message is necessary because weak sysmem VMOs have not always been
9731 /// a thing, so older clients are not aware of the need to pay attention to
9732 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
9733 /// sysmem weak VMO handles asap. By having this message and requiring
9734 /// participants to indicate their acceptance of this aspect of the overall
9735 /// protocol, we avoid situations where an older client is delivered a weak
9736 /// VMO without any way for sysmem to get that VMO to close quickly later
9737 /// (and on a per-buffer basis).
9738 ///
9739 /// A participant that doesn't handle `close_weak_asap` and also doesn't
9740 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
9741 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
9742 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
9743 /// same participant has a child/delegate which does retrieve VMOs, that
9744 /// child/delegate will need to send `SetWeakOk` before
9745 /// `WaitForAllBuffersAllocated`.
9746 ///
9747 /// + request `for_child_nodes_also` If present and true, this means direct
9748 /// child nodes of this node created after this message plus all
9749 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
9750 /// those nodes. Any child node of this node that was created before this
9751 /// message is not included. This setting is "sticky" in the sense that a
9752 /// subsequent `SetWeakOk` without this bool set to true does not reset
9753 /// the server-side bool. If this creates a problem for a participant, a
9754 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
9755 /// tokens instead, as appropriate. A participant should only set
9756 /// `for_child_nodes_also` true if the participant can really promise to
9757 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
9758 /// weak VMO handles held by participants holding the corresponding child
9759 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
9760 /// which are using sysmem(1) can be weak, despite the clients of those
9761 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
9762 /// direct way to find out about `close_weak_asap`. This only applies to
9763 /// descendents of this `Node` which are using sysmem(1), not to this
9764 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
9765 /// token, which will fail allocation unless an ancestor of this `Node`
9766 /// specified `for_child_nodes_also` true.
9767 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
9768 self.client.send::<NodeSetWeakOkRequest>(
9769 &mut payload,
9770 0x38a44fc4d7724be9,
9771 fidl::encoding::DynamicFlags::FLEXIBLE,
9772 )
9773 }
9774
9775 /// The server_end will be closed after this `Node` and any child nodes have
9776 /// have released their buffer counts, making those counts available for
9777 /// reservation by a different `Node` via
9778 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
9779 ///
9780 /// The `Node` buffer counts may not be released until the entire tree of
9781 /// `Node`(s) is closed or failed, because
9782 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
9783 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
9784 /// `Node` buffer counts remain reserved until the orphaned node is later
9785 /// cleaned up.
9786 ///
9787 /// If the `Node` exceeds a fairly large number of attached eventpair server
9788 /// ends, a log message will indicate this and the `Node` (and the
9789 /// appropriate) sub-tree will fail.
9790 ///
9791 /// The `server_end` will remain open when
9792 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
9793 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
9794 /// [`fuchsia.sysmem2/BufferCollection`].
9795 ///
9796 /// This message can also be used with a
9797 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
9798 pub fn r#attach_node_tracking(
9799 &self,
9800 mut payload: NodeAttachNodeTrackingRequest,
9801 ) -> Result<(), fidl::Error> {
9802 self.client.send::<NodeAttachNodeTrackingRequest>(
9803 &mut payload,
9804 0x3f22f2a293d3cdac,
9805 fidl::encoding::DynamicFlags::FLEXIBLE,
9806 )
9807 }
9808
9809 /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
9810 /// (including its children) will be selected during allocation (or logical
9811 /// allocation).
9812 ///
9813 /// Before passing the client end of this token to
9814 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
9815 /// [`fuchsia.sysmem2/Node.Sync`] after
9816 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
9817 /// Or the client can use
9818 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
9819 /// essentially includes the `Sync`.
9820 ///
9821 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9822 /// fail the group's subtree and close the connection.
9823 ///
9824 /// After all children have been created, send AllChildrenPresent.
9825 ///
9826 /// + request `token_request` The server end of the new token channel.
9827 /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
9828 /// token allows the holder to get the same rights to buffers as the
9829 /// parent token (of the group) had. When the value isn't
9830 /// ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
9831 /// bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
9832 /// for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
9833 /// causes subtree failure.
9834 pub fn r#create_child(
9835 &self,
9836 mut payload: BufferCollectionTokenGroupCreateChildRequest,
9837 ) -> Result<(), fidl::Error> {
9838 self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
9839 &mut payload,
9840 0x41a0075d419f30c5,
9841 fidl::encoding::DynamicFlags::FLEXIBLE,
9842 )
9843 }
9844
9845 /// Create 1 or more child tokens at once, synchronously. In contrast to
9846 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
9847 /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
9848 /// of a returned token to
9849 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
9850 ///
9851 /// The lower-index child tokens are higher priority (attempted sooner) than
9852 /// higher-index child tokens.
9853 ///
9854 /// As per all child tokens, successful aggregation will choose exactly one
9855 /// child among all created children (across all children created across
9856 /// potentially multiple calls to
9857 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
9858 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
9859 ///
9860 /// The maximum permissible total number of children per group, and total
9861 /// number of nodes in an overall tree (from the root) are capped to limits
9862 /// which are not configurable via these protocols.
9863 ///
9864 /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
9865 /// this will fail the group's subtree and close the connection.
9866 ///
9867 /// After all children have been created, send AllChildrenPresent.
9868 ///
9869 /// + request `rights_attentuation_masks` The size of the
9870 /// `rights_attentuation_masks` determines the number of created child
9871 /// tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
9872 /// The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
9873 /// other value, each 0 bit in the mask attenuates that right.
9874 /// - response `tokens` The created child tokens.
9875 pub fn r#create_children_sync(
9876 &self,
9877 mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9878 ___deadline: zx::MonotonicInstant,
9879 ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
9880 let _response = self.client.send_query::<
9881 BufferCollectionTokenGroupCreateChildrenSyncRequest,
9882 fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
9883 BufferCollectionTokenGroupMarker,
9884 >(
9885 payload,
9886 0x15dea448c536070a,
9887 fidl::encoding::DynamicFlags::FLEXIBLE,
9888 ___deadline,
9889 )?
9890 .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
9891 Ok(_response)
9892 }
9893
9894 /// Indicate that no more children will be created.
9895 ///
9896 /// After creating all children, the client should send
9897 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
9898 /// inform sysmem that no more children will be created, so that sysmem can
9899 /// know when it's ok to start aggregating constraints.
9900 ///
9901 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9902 /// fail the group's subtree and close the connection.
9903 ///
9904 /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
9905 /// after `AllChildrenPresent`, else failure of the group's subtree will be
9906 /// triggered. This is intentionally not analogous to how `Release` without
9907 /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
9908 /// subtree failure.
9909 pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
9910 self.client.send::<fidl::encoding::EmptyPayload>(
9911 (),
9912 0x5c327e4a23391312,
9913 fidl::encoding::DynamicFlags::FLEXIBLE,
9914 )
9915 }
9916}
9917
9918#[cfg(target_os = "fuchsia")]
9919impl From<BufferCollectionTokenGroupSynchronousProxy> for zx::NullableHandle {
9920 fn from(value: BufferCollectionTokenGroupSynchronousProxy) -> Self {
9921 value.into_channel().into()
9922 }
9923}
9924
9925#[cfg(target_os = "fuchsia")]
9926impl From<fidl::Channel> for BufferCollectionTokenGroupSynchronousProxy {
9927 fn from(value: fidl::Channel) -> Self {
9928 Self::new(value)
9929 }
9930}
9931
9932#[cfg(target_os = "fuchsia")]
9933impl fidl::endpoints::FromClient for BufferCollectionTokenGroupSynchronousProxy {
9934 type Protocol = BufferCollectionTokenGroupMarker;
9935
9936 fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionTokenGroupMarker>) -> Self {
9937 Self::new(value.into_channel())
9938 }
9939}
9940
9941#[derive(Debug, Clone)]
9942pub struct BufferCollectionTokenGroupProxy {
9943 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
9944}
9945
9946impl fidl::endpoints::Proxy for BufferCollectionTokenGroupProxy {
9947 type Protocol = BufferCollectionTokenGroupMarker;
9948
9949 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
9950 Self::new(inner)
9951 }
9952
9953 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
9954 self.client.into_channel().map_err(|client| Self { client })
9955 }
9956
9957 fn as_channel(&self) -> &::fidl::AsyncChannel {
9958 self.client.as_channel()
9959 }
9960}
9961
9962impl BufferCollectionTokenGroupProxy {
9963 /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionTokenGroup.
9964 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
9965 let protocol_name =
9966 <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9967 Self { client: fidl::client::Client::new(channel, protocol_name) }
9968 }
9969
9970 /// Get a Stream of events from the remote end of the protocol.
9971 ///
9972 /// # Panics
9973 ///
9974 /// Panics if the event stream was already taken.
9975 pub fn take_event_stream(&self) -> BufferCollectionTokenGroupEventStream {
9976 BufferCollectionTokenGroupEventStream { event_receiver: self.client.take_event_receiver() }
9977 }
9978
9979 /// Ensure that previous messages have been received server side. This is
9980 /// particularly useful after previous messages that created new tokens,
9981 /// because a token must be known to the sysmem server before sending the
9982 /// token to another participant.
9983 ///
9984 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9985 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9986 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9987 /// to mitigate the possibility of a hostile/fake
9988 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9989 /// Another way is to pass the token to
9990 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9991 /// the token as part of exchanging it for a
9992 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9993 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9994 /// of stalling.
9995 ///
9996 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9997 /// and then starting and completing a `Sync`, it's then safe to send the
9998 /// `BufferCollectionToken` client ends to other participants knowing the
9999 /// server will recognize the tokens when they're sent by the other
10000 /// participants to sysmem in a
10001 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
10002 /// efficient way to create tokens while avoiding unnecessary round trips.
10003 ///
10004 /// Other options include waiting for each
10005 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
10006 /// individually (using separate call to `Sync` after each), or calling
10007 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
10008 /// converted to a `BufferCollection` via
10009 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
10010 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
10011 /// the sync step and can create multiple tokens at once.
10012 pub fn r#sync(
10013 &self,
10014 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
10015 BufferCollectionTokenGroupProxyInterface::r#sync(self)
10016 }
10017
10018 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
10019 ///
10020 /// Normally a participant will convert a `BufferCollectionToken` into a
10021 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
10022 /// `Release` via the token (and then close the channel immediately or
10023 /// shortly later in response to server closing the server end), which
10024 /// avoids causing buffer collection failure. Without a prior `Release`,
10025 /// closing the `BufferCollectionToken` client end will cause buffer
10026 /// collection failure.
10027 ///
10028 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
10029 ///
10030 /// By default the server handles unexpected closure of a
10031 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
10032 /// first) by failing the buffer collection. Partly this is to expedite
10033 /// closing VMO handles to reclaim memory when any participant fails. If a
10034 /// participant would like to cleanly close a `BufferCollection` without
10035 /// causing buffer collection failure, the participant can send `Release`
10036 /// before closing the `BufferCollection` client end. The `Release` can
10037 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
10038 /// buffer collection won't require constraints from this node in order to
10039 /// allocate. If after `SetConstraints`, the constraints are retained and
10040 /// aggregated, despite the lack of `BufferCollection` connection at the
10041 /// time of constraints aggregation.
10042 ///
10043 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
10044 ///
10045 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
10046 /// end (without `Release` first) will trigger failure of the buffer
10047 /// collection. To close a `BufferCollectionTokenGroup` channel without
10048 /// failing the buffer collection, ensure that AllChildrenPresent() has been
10049 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
10050 /// client end.
10051 ///
10052 /// If `Release` occurs before
10053 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
10054 /// buffer collection will fail (triggered by reception of `Release` without
10055 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
10056 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
10057 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
10058 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
10059 /// close requires `AllChildrenPresent` (if not already sent), then
10060 /// `Release`, then close client end.
10061 ///
10062 /// If `Release` occurs after `AllChildrenPresent`, the children and all
10063 /// their constraints remain intact (just as they would if the
10064 /// `BufferCollectionTokenGroup` channel had remained open), and the client
10065 /// end close doesn't trigger buffer collection failure.
10066 ///
10067 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
10068 ///
10069 /// For brevity, the per-channel-protocol paragraphs above ignore the
10070 /// separate failure domain created by
10071 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
10072 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
10073 /// unexpectedly closes (without `Release` first) and that client end is
10074 /// under a failure domain, instead of failing the whole buffer collection,
10075 /// the failure domain is failed, but the buffer collection itself is
10076 /// isolated from failure of the failure domain. Such failure domains can be
10077 /// nested, in which case only the inner-most failure domain in which the
10078 /// `Node` resides fails.
10079 pub fn r#release(&self) -> Result<(), fidl::Error> {
10080 BufferCollectionTokenGroupProxyInterface::r#release(self)
10081 }
10082
10083 /// Set a name for VMOs in this buffer collection.
10084 ///
10085 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
10086 /// will be truncated to fit. The name of the vmo will be suffixed with the
10087 /// buffer index within the collection (if the suffix fits within
10088 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
10089 /// listed in the inspect data.
10090 ///
10091 /// The name only affects VMOs allocated after the name is set; this call
10092 /// does not rename existing VMOs. If multiple clients set different names
10093 /// then the larger priority value will win. Setting a new name with the
10094 /// same priority as a prior name doesn't change the name.
10095 ///
10096 /// All table fields are currently required.
10097 ///
10098 /// + request `priority` The name is only set if this is the first `SetName`
10099 /// or if `priority` is greater than any previous `priority` value in
10100 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
10101 /// + request `name` The name for VMOs created under this buffer collection.
10102 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10103 BufferCollectionTokenGroupProxyInterface::r#set_name(self, payload)
10104 }
10105
10106 /// Set information about the current client that can be used by sysmem to
10107 /// help diagnose leaking memory and allocation stalls waiting for a
10108 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
10109 ///
10110 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
10111 /// `Node`(s) derived from this `Node`, unless overriden by
10112 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
10113 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
10114 ///
10115 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
10116 /// `Allocator` is the most efficient way to ensure that all
10117 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
10118 /// set, and is also more efficient than separately sending the same debug
10119 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
10120 /// created [`fuchsia.sysmem2/Node`].
10121 ///
10122 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
10123 /// indicate which client is closing their channel first, leading to subtree
10124 /// failure (which can be normal if the purpose of the subtree is over, but
10125 /// if happening earlier than expected, the client-channel-specific name can
10126 /// help diagnose where the failure is first coming from, from sysmem's
10127 /// point of view).
10128 ///
10129 /// All table fields are currently required.
10130 ///
10131 /// + request `name` This can be an arbitrary string, but the current
10132 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
10133 /// + request `id` This can be an arbitrary id, but the current process ID
10134 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
10135 pub fn r#set_debug_client_info(
10136 &self,
10137 mut payload: &NodeSetDebugClientInfoRequest,
10138 ) -> Result<(), fidl::Error> {
10139 BufferCollectionTokenGroupProxyInterface::r#set_debug_client_info(self, payload)
10140 }
10141
10142 /// Sysmem logs a warning if sysmem hasn't seen
10143 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
10144 /// within 5 seconds after creation of a new collection.
10145 ///
10146 /// Clients can call this method to change when the log is printed. If
10147 /// multiple client set the deadline, it's unspecified which deadline will
10148 /// take effect.
10149 ///
10150 /// In most cases the default works well.
10151 ///
10152 /// All table fields are currently required.
10153 ///
10154 /// + request `deadline` The time at which sysmem will start trying to log
10155 /// the warning, unless all constraints are with sysmem by then.
10156 pub fn r#set_debug_timeout_log_deadline(
10157 &self,
10158 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10159 ) -> Result<(), fidl::Error> {
10160 BufferCollectionTokenGroupProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
10161 }
10162
10163 /// This enables verbose logging for the buffer collection.
10164 ///
10165 /// Verbose logging includes constraints set via
10166 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
10167 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
10168 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
10169 /// the tree of `Node`(s).
10170 ///
10171 /// Normally sysmem prints only a single line complaint when aggregation
10172 /// fails, with just the specific detailed reason that aggregation failed,
10173 /// with little surrounding context. While this is often enough to diagnose
10174 /// a problem if only a small change was made and everything was working
10175 /// before the small change, it's often not particularly helpful for getting
10176 /// a new buffer collection to work for the first time. Especially with
10177 /// more complex trees of nodes, involving things like
10178 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
10179 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
10180 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
10181 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
10182 /// looks like and why it's failing a logical allocation, or why a tree or
10183 /// subtree is failing sooner than expected.
10184 ///
10185 /// The intent of the extra logging is to be acceptable from a performance
10186 /// point of view, under the assumption that verbose logging is only enabled
10187 /// on a low number of buffer collections. If we're not tracking down a bug,
10188 /// we shouldn't send this message.
10189 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10190 BufferCollectionTokenGroupProxyInterface::r#set_verbose_logging(self)
10191 }
10192
10193 /// This gets a handle that can be used as a parameter to
10194 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
10195 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
10196 /// client obtained this handle from this `Node`.
10197 ///
10198 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
10199 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
10200 /// despite the two calls typically being on different channels.
10201 ///
10202 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
10203 ///
10204 /// All table fields are currently required.
10205 ///
10206 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
10207 /// different `Node` channel, to prove that the client obtained the handle
10208 /// from this `Node`.
10209 pub fn r#get_node_ref(
10210 &self,
10211 ) -> fidl::client::QueryResponseFut<
10212 NodeGetNodeRefResponse,
10213 fidl::encoding::DefaultFuchsiaResourceDialect,
10214 > {
10215 BufferCollectionTokenGroupProxyInterface::r#get_node_ref(self)
10216 }
10217
10218 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
10219 /// rooted at a different child token of a common parent
10220 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
10221 /// passed-in `node_ref`.
10222 ///
10223 /// This call is for assisting with admission control de-duplication, and
10224 /// with debugging.
10225 ///
10226 /// The `node_ref` must be obtained using
10227 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
10228 ///
10229 /// The `node_ref` can be a duplicated handle; it's not necessary to call
10230 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
10231 ///
10232 /// If a calling token may not actually be a valid token at all due to a
10233 /// potentially hostile/untrusted provider of the token, call
10234 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
10235 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
10236 /// never responds due to a calling token not being a real token (not really
10237 /// talking to sysmem). Another option is to call
10238 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
10239 /// which also validates the token along with converting it to a
10240 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
10241 ///
10242 /// All table fields are currently required.
10243 ///
10244 /// - response `is_alternate`
10245 /// - true: The first parent node in common between the calling node and
10246 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
10247 /// that the calling `Node` and the `node_ref` `Node` will not have both
10248 /// their constraints apply - rather sysmem will choose one or the other
10249 /// of the constraints - never both. This is because only one child of
10250 /// a `BufferCollectionTokenGroup` is selected during logical
10251 /// allocation, with only that one child's subtree contributing to
10252 /// constraints aggregation.
10253 /// - false: The first parent node in common between the calling `Node`
10254 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
10255 /// Currently, this means the first parent node in common is a
10256 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
10257 /// `Release`ed). This means that the calling `Node` and the `node_ref`
10258 /// `Node` may have both their constraints apply during constraints
10259 /// aggregation of the logical allocation, if both `Node`(s) are
10260 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
10261 /// this case, there is no `BufferCollectionTokenGroup` that will
10262 /// directly prevent the two `Node`(s) from both being selected and
10263 /// their constraints both aggregated, but even when false, one or both
10264 /// `Node`(s) may still be eliminated from consideration if one or both
10265 /// `Node`(s) has a direct or indirect parent
10266 /// `BufferCollectionTokenGroup` which selects a child subtree other
10267 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
10268 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
10269 /// associated with the same buffer collection as the calling `Node`.
10270 /// Another reason for this error is if the `node_ref` is an
10271 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
10272 /// a real `node_ref` obtained from `GetNodeRef`.
10273 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
10274 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
10275 /// the needed rights expected on a real `node_ref`.
10276 /// * No other failing status codes are returned by this call. However,
10277 /// sysmem may add additional codes in future, so the client should have
10278 /// sensible default handling for any failing status code.
10279 pub fn r#is_alternate_for(
10280 &self,
10281 mut payload: NodeIsAlternateForRequest,
10282 ) -> fidl::client::QueryResponseFut<
10283 NodeIsAlternateForResult,
10284 fidl::encoding::DefaultFuchsiaResourceDialect,
10285 > {
10286 BufferCollectionTokenGroupProxyInterface::r#is_alternate_for(self, payload)
10287 }
10288
10289 /// Get the buffer collection ID. This ID is also available from
10290 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
10291 /// within the collection).
10292 ///
10293 /// This call is mainly useful in situations where we can't convey a
10294 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
10295 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
10296 /// handle, which can be joined back up with a `BufferCollection` client end
10297 /// that was created via a different path. Prefer to convey a
10298 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
10299 ///
10300 /// Trusting a `buffer_collection_id` value from a source other than sysmem
10301 /// is analogous to trusting a koid value from a source other than zircon.
10302 /// Both should be avoided unless really necessary, and both require
10303 /// caution. In some situations it may be reasonable to refer to a
10304 /// pre-established `BufferCollection` by `buffer_collection_id` via a
10305 /// protocol for efficiency reasons, but an incoming value purporting to be
10306 /// a `buffer_collection_id` is not sufficient alone to justify granting the
10307 /// sender of the `buffer_collection_id` any capability. The sender must
10308 /// first prove to a receiver that the sender has/had a VMO or has/had a
10309 /// `BufferCollectionToken` to the same collection by sending a handle that
10310 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
10311 /// `buffer_collection_id` value. The receiver should take care to avoid
10312 /// assuming that a sender had a `BufferCollectionToken` in cases where the
10313 /// sender has only proven that the sender had a VMO.
10314 ///
10315 /// - response `buffer_collection_id` This ID is unique per buffer
10316 /// collection per boot. Each buffer is uniquely identified by the
10317 /// `buffer_collection_id` and `buffer_index` together.
10318 pub fn r#get_buffer_collection_id(
10319 &self,
10320 ) -> fidl::client::QueryResponseFut<
10321 NodeGetBufferCollectionIdResponse,
10322 fidl::encoding::DefaultFuchsiaResourceDialect,
10323 > {
10324 BufferCollectionTokenGroupProxyInterface::r#get_buffer_collection_id(self)
10325 }
10326
10327 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
10328 /// created after this message to weak, which means that a client's `Node`
10329 /// client end (or a child created after this message) is not alone
10330 /// sufficient to keep allocated VMOs alive.
10331 ///
10332 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
10333 /// `close_weak_asap`.
10334 ///
10335 /// This message is only permitted before the `Node` becomes ready for
10336 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
10337 /// * `BufferCollectionToken`: any time
10338 /// * `BufferCollection`: before `SetConstraints`
10339 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
10340 ///
10341 /// Currently, no conversion from strong `Node` to weak `Node` after ready
10342 /// for allocation is provided, but a client can simulate that by creating
10343 /// an additional `Node` before allocation and setting that additional
10344 /// `Node` to weak, and then potentially at some point later sending
10345 /// `Release` and closing the client end of the client's strong `Node`, but
10346 /// keeping the client's weak `Node`.
10347 ///
10348 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
10349 /// collection failure (all `Node` client end(s) will see
10350 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
10351 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
10352 /// this situation until all `Node`(s) are ready for allocation. For initial
10353 /// allocation to succeed, at least one strong `Node` is required to exist
10354 /// at allocation time, but after that client receives VMO handles, that
10355 /// client can `BufferCollection.Release` and close the client end without
10356 /// causing this type of failure.
10357 ///
10358 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
10359 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
10360 /// separately as appropriate.
10361 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
10362 BufferCollectionTokenGroupProxyInterface::r#set_weak(self)
10363 }
10364
10365 /// This indicates to sysmem that the client is prepared to pay attention to
10366 /// `close_weak_asap`.
10367 ///
10368 /// If sent, this message must be before
10369 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
10370 ///
10371 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
10372 /// send this message before `WaitForAllBuffersAllocated`, or a parent
10373 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
10374 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
10375 /// trigger buffer collection failure.
10376 ///
10377 /// This message is necessary because weak sysmem VMOs have not always been
10378 /// a thing, so older clients are not aware of the need to pay attention to
10379 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
10380 /// sysmem weak VMO handles asap. By having this message and requiring
10381 /// participants to indicate their acceptance of this aspect of the overall
10382 /// protocol, we avoid situations where an older client is delivered a weak
10383 /// VMO without any way for sysmem to get that VMO to close quickly later
10384 /// (and on a per-buffer basis).
10385 ///
10386 /// A participant that doesn't handle `close_weak_asap` and also doesn't
10387 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
10388 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
10389 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
10390 /// same participant has a child/delegate which does retrieve VMOs, that
10391 /// child/delegate will need to send `SetWeakOk` before
10392 /// `WaitForAllBuffersAllocated`.
10393 ///
10394 /// + request `for_child_nodes_also` If present and true, this means direct
10395 /// child nodes of this node created after this message plus all
10396 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
10397 /// those nodes. Any child node of this node that was created before this
10398 /// message is not included. This setting is "sticky" in the sense that a
10399 /// subsequent `SetWeakOk` without this bool set to true does not reset
10400 /// the server-side bool. If this creates a problem for a participant, a
10401 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
10402 /// tokens instead, as appropriate. A participant should only set
10403 /// `for_child_nodes_also` true if the participant can really promise to
10404 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
10405 /// weak VMO handles held by participants holding the corresponding child
10406 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
10407 /// which are using sysmem(1) can be weak, despite the clients of those
10408 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
10409 /// direct way to find out about `close_weak_asap`. This only applies to
10410 /// descendents of this `Node` which are using sysmem(1), not to this
10411 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
10412 /// token, which will fail allocation unless an ancestor of this `Node`
10413 /// specified `for_child_nodes_also` true.
10414 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10415 BufferCollectionTokenGroupProxyInterface::r#set_weak_ok(self, payload)
10416 }
10417
10418 /// The server_end will be closed after this `Node` and any child nodes have
10419 /// have released their buffer counts, making those counts available for
10420 /// reservation by a different `Node` via
10421 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
10422 ///
10423 /// The `Node` buffer counts may not be released until the entire tree of
10424 /// `Node`(s) is closed or failed, because
10425 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
10426 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
10427 /// `Node` buffer counts remain reserved until the orphaned node is later
10428 /// cleaned up.
10429 ///
10430 /// If the `Node` exceeds a fairly large number of attached eventpair server
10431 /// ends, a log message will indicate this and the `Node` (and the
10432 /// appropriate) sub-tree will fail.
10433 ///
10434 /// The `server_end` will remain open when
10435 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
10436 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
10437 /// [`fuchsia.sysmem2/BufferCollection`].
10438 ///
10439 /// This message can also be used with a
10440 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
10441 pub fn r#attach_node_tracking(
10442 &self,
10443 mut payload: NodeAttachNodeTrackingRequest,
10444 ) -> Result<(), fidl::Error> {
10445 BufferCollectionTokenGroupProxyInterface::r#attach_node_tracking(self, payload)
10446 }
10447
10448 /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
10449 /// (including its children) will be selected during allocation (or logical
10450 /// allocation).
10451 ///
10452 /// Before passing the client end of this token to
10453 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
10454 /// [`fuchsia.sysmem2/Node.Sync`] after
10455 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
10456 /// Or the client can use
10457 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
10458 /// essentially includes the `Sync`.
10459 ///
10460 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10461 /// fail the group's subtree and close the connection.
10462 ///
10463 /// After all children have been created, send AllChildrenPresent.
10464 ///
10465 /// + request `token_request` The server end of the new token channel.
10466 /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
10467 /// token allows the holder to get the same rights to buffers as the
10468 /// parent token (of the group) had. When the value isn't
10469 /// ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
10470 /// bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
10471 /// for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
10472 /// causes subtree failure.
10473 pub fn r#create_child(
10474 &self,
10475 mut payload: BufferCollectionTokenGroupCreateChildRequest,
10476 ) -> Result<(), fidl::Error> {
10477 BufferCollectionTokenGroupProxyInterface::r#create_child(self, payload)
10478 }
10479
10480 /// Create 1 or more child tokens at once, synchronously. In contrast to
10481 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
10482 /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
10483 /// of a returned token to
10484 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
10485 ///
10486 /// The lower-index child tokens are higher priority (attempted sooner) than
10487 /// higher-index child tokens.
10488 ///
10489 /// As per all child tokens, successful aggregation will choose exactly one
10490 /// child among all created children (across all children created across
10491 /// potentially multiple calls to
10492 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
10493 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
10494 ///
10495 /// The maximum permissible total number of children per group, and total
10496 /// number of nodes in an overall tree (from the root) are capped to limits
10497 /// which are not configurable via these protocols.
10498 ///
10499 /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
10500 /// this will fail the group's subtree and close the connection.
10501 ///
10502 /// After all children have been created, send AllChildrenPresent.
10503 ///
10504 /// + request `rights_attentuation_masks` The size of the
10505 /// `rights_attentuation_masks` determines the number of created child
10506 /// tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
10507 /// The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
10508 /// other value, each 0 bit in the mask attenuates that right.
10509 /// - response `tokens` The created child tokens.
10510 pub fn r#create_children_sync(
10511 &self,
10512 mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10513 ) -> fidl::client::QueryResponseFut<
10514 BufferCollectionTokenGroupCreateChildrenSyncResponse,
10515 fidl::encoding::DefaultFuchsiaResourceDialect,
10516 > {
10517 BufferCollectionTokenGroupProxyInterface::r#create_children_sync(self, payload)
10518 }
10519
10520 /// Indicate that no more children will be created.
10521 ///
10522 /// After creating all children, the client should send
10523 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
10524 /// inform sysmem that no more children will be created, so that sysmem can
10525 /// know when it's ok to start aggregating constraints.
10526 ///
10527 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10528 /// fail the group's subtree and close the connection.
10529 ///
10530 /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
10531 /// after `AllChildrenPresent`, else failure of the group's subtree will be
10532 /// triggered. This is intentionally not analogous to how `Release` without
10533 /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
10534 /// subtree failure.
10535 pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10536 BufferCollectionTokenGroupProxyInterface::r#all_children_present(self)
10537 }
10538}
10539
10540impl BufferCollectionTokenGroupProxyInterface for BufferCollectionTokenGroupProxy {
10541 type SyncResponseFut =
10542 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
10543 fn r#sync(&self) -> Self::SyncResponseFut {
10544 fn _decode(
10545 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10546 ) -> Result<(), fidl::Error> {
10547 let _response = fidl::client::decode_transaction_body::<
10548 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
10549 fidl::encoding::DefaultFuchsiaResourceDialect,
10550 0x11ac2555cf575b54,
10551 >(_buf?)?
10552 .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
10553 Ok(_response)
10554 }
10555 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
10556 (),
10557 0x11ac2555cf575b54,
10558 fidl::encoding::DynamicFlags::FLEXIBLE,
10559 _decode,
10560 )
10561 }
10562
10563 fn r#release(&self) -> Result<(), fidl::Error> {
10564 self.client.send::<fidl::encoding::EmptyPayload>(
10565 (),
10566 0x6a5cae7d6d6e04c6,
10567 fidl::encoding::DynamicFlags::FLEXIBLE,
10568 )
10569 }
10570
10571 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10572 self.client.send::<NodeSetNameRequest>(
10573 payload,
10574 0xb41f1624f48c1e9,
10575 fidl::encoding::DynamicFlags::FLEXIBLE,
10576 )
10577 }
10578
10579 fn r#set_debug_client_info(
10580 &self,
10581 mut payload: &NodeSetDebugClientInfoRequest,
10582 ) -> Result<(), fidl::Error> {
10583 self.client.send::<NodeSetDebugClientInfoRequest>(
10584 payload,
10585 0x5cde8914608d99b1,
10586 fidl::encoding::DynamicFlags::FLEXIBLE,
10587 )
10588 }
10589
10590 fn r#set_debug_timeout_log_deadline(
10591 &self,
10592 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10593 ) -> Result<(), fidl::Error> {
10594 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
10595 payload,
10596 0x716b0af13d5c0806,
10597 fidl::encoding::DynamicFlags::FLEXIBLE,
10598 )
10599 }
10600
10601 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10602 self.client.send::<fidl::encoding::EmptyPayload>(
10603 (),
10604 0x5209c77415b4dfad,
10605 fidl::encoding::DynamicFlags::FLEXIBLE,
10606 )
10607 }
10608
10609 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
10610 NodeGetNodeRefResponse,
10611 fidl::encoding::DefaultFuchsiaResourceDialect,
10612 >;
10613 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
10614 fn _decode(
10615 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10616 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
10617 let _response = fidl::client::decode_transaction_body::<
10618 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
10619 fidl::encoding::DefaultFuchsiaResourceDialect,
10620 0x5b3d0e51614df053,
10621 >(_buf?)?
10622 .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
10623 Ok(_response)
10624 }
10625 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
10626 (),
10627 0x5b3d0e51614df053,
10628 fidl::encoding::DynamicFlags::FLEXIBLE,
10629 _decode,
10630 )
10631 }
10632
10633 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
10634 NodeIsAlternateForResult,
10635 fidl::encoding::DefaultFuchsiaResourceDialect,
10636 >;
10637 fn r#is_alternate_for(
10638 &self,
10639 mut payload: NodeIsAlternateForRequest,
10640 ) -> Self::IsAlternateForResponseFut {
10641 fn _decode(
10642 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10643 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
10644 let _response = fidl::client::decode_transaction_body::<
10645 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
10646 fidl::encoding::DefaultFuchsiaResourceDialect,
10647 0x3a58e00157e0825,
10648 >(_buf?)?
10649 .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
10650 Ok(_response.map(|x| x))
10651 }
10652 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
10653 &mut payload,
10654 0x3a58e00157e0825,
10655 fidl::encoding::DynamicFlags::FLEXIBLE,
10656 _decode,
10657 )
10658 }
10659
10660 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
10661 NodeGetBufferCollectionIdResponse,
10662 fidl::encoding::DefaultFuchsiaResourceDialect,
10663 >;
10664 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
10665 fn _decode(
10666 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10667 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
10668 let _response = fidl::client::decode_transaction_body::<
10669 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
10670 fidl::encoding::DefaultFuchsiaResourceDialect,
10671 0x77d19a494b78ba8c,
10672 >(_buf?)?
10673 .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
10674 Ok(_response)
10675 }
10676 self.client.send_query_and_decode::<
10677 fidl::encoding::EmptyPayload,
10678 NodeGetBufferCollectionIdResponse,
10679 >(
10680 (),
10681 0x77d19a494b78ba8c,
10682 fidl::encoding::DynamicFlags::FLEXIBLE,
10683 _decode,
10684 )
10685 }
10686
10687 fn r#set_weak(&self) -> Result<(), fidl::Error> {
10688 self.client.send::<fidl::encoding::EmptyPayload>(
10689 (),
10690 0x22dd3ea514eeffe1,
10691 fidl::encoding::DynamicFlags::FLEXIBLE,
10692 )
10693 }
10694
10695 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10696 self.client.send::<NodeSetWeakOkRequest>(
10697 &mut payload,
10698 0x38a44fc4d7724be9,
10699 fidl::encoding::DynamicFlags::FLEXIBLE,
10700 )
10701 }
10702
10703 fn r#attach_node_tracking(
10704 &self,
10705 mut payload: NodeAttachNodeTrackingRequest,
10706 ) -> Result<(), fidl::Error> {
10707 self.client.send::<NodeAttachNodeTrackingRequest>(
10708 &mut payload,
10709 0x3f22f2a293d3cdac,
10710 fidl::encoding::DynamicFlags::FLEXIBLE,
10711 )
10712 }
10713
10714 fn r#create_child(
10715 &self,
10716 mut payload: BufferCollectionTokenGroupCreateChildRequest,
10717 ) -> Result<(), fidl::Error> {
10718 self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
10719 &mut payload,
10720 0x41a0075d419f30c5,
10721 fidl::encoding::DynamicFlags::FLEXIBLE,
10722 )
10723 }
10724
10725 type CreateChildrenSyncResponseFut = fidl::client::QueryResponseFut<
10726 BufferCollectionTokenGroupCreateChildrenSyncResponse,
10727 fidl::encoding::DefaultFuchsiaResourceDialect,
10728 >;
10729 fn r#create_children_sync(
10730 &self,
10731 mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10732 ) -> Self::CreateChildrenSyncResponseFut {
10733 fn _decode(
10734 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10735 ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
10736 let _response = fidl::client::decode_transaction_body::<
10737 fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
10738 fidl::encoding::DefaultFuchsiaResourceDialect,
10739 0x15dea448c536070a,
10740 >(_buf?)?
10741 .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
10742 Ok(_response)
10743 }
10744 self.client.send_query_and_decode::<
10745 BufferCollectionTokenGroupCreateChildrenSyncRequest,
10746 BufferCollectionTokenGroupCreateChildrenSyncResponse,
10747 >(
10748 payload,
10749 0x15dea448c536070a,
10750 fidl::encoding::DynamicFlags::FLEXIBLE,
10751 _decode,
10752 )
10753 }
10754
10755 fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10756 self.client.send::<fidl::encoding::EmptyPayload>(
10757 (),
10758 0x5c327e4a23391312,
10759 fidl::encoding::DynamicFlags::FLEXIBLE,
10760 )
10761 }
10762}
10763
10764pub struct BufferCollectionTokenGroupEventStream {
10765 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
10766}
10767
10768impl std::marker::Unpin for BufferCollectionTokenGroupEventStream {}
10769
10770impl futures::stream::FusedStream for BufferCollectionTokenGroupEventStream {
10771 fn is_terminated(&self) -> bool {
10772 self.event_receiver.is_terminated()
10773 }
10774}
10775
10776impl futures::Stream for BufferCollectionTokenGroupEventStream {
10777 type Item = Result<BufferCollectionTokenGroupEvent, fidl::Error>;
10778
10779 fn poll_next(
10780 mut self: std::pin::Pin<&mut Self>,
10781 cx: &mut std::task::Context<'_>,
10782 ) -> std::task::Poll<Option<Self::Item>> {
10783 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
10784 &mut self.event_receiver,
10785 cx
10786 )?) {
10787 Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenGroupEvent::decode(buf))),
10788 None => std::task::Poll::Ready(None),
10789 }
10790 }
10791}
10792
10793#[derive(Debug)]
10794pub enum BufferCollectionTokenGroupEvent {
10795 #[non_exhaustive]
10796 _UnknownEvent {
10797 /// Ordinal of the event that was sent.
10798 ordinal: u64,
10799 },
10800}
10801
10802impl BufferCollectionTokenGroupEvent {
10803 /// Decodes a message buffer as a [`BufferCollectionTokenGroupEvent`].
10804 fn decode(
10805 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
10806 ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
10807 let (bytes, _handles) = buf.split_mut();
10808 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10809 debug_assert_eq!(tx_header.tx_id, 0);
10810 match tx_header.ordinal {
10811 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
10812 Ok(BufferCollectionTokenGroupEvent::_UnknownEvent {
10813 ordinal: tx_header.ordinal,
10814 })
10815 }
10816 _ => Err(fidl::Error::UnknownOrdinal {
10817 ordinal: tx_header.ordinal,
10818 protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
10819 })
10820 }
10821 }
10822}
10823
10824/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionTokenGroup.
10825pub struct BufferCollectionTokenGroupRequestStream {
10826 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10827 is_terminated: bool,
10828}
10829
10830impl std::marker::Unpin for BufferCollectionTokenGroupRequestStream {}
10831
10832impl futures::stream::FusedStream for BufferCollectionTokenGroupRequestStream {
10833 fn is_terminated(&self) -> bool {
10834 self.is_terminated
10835 }
10836}
10837
10838impl fidl::endpoints::RequestStream for BufferCollectionTokenGroupRequestStream {
10839 type Protocol = BufferCollectionTokenGroupMarker;
10840 type ControlHandle = BufferCollectionTokenGroupControlHandle;
10841
10842 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
10843 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
10844 }
10845
10846 fn control_handle(&self) -> Self::ControlHandle {
10847 BufferCollectionTokenGroupControlHandle { inner: self.inner.clone() }
10848 }
10849
10850 fn into_inner(
10851 self,
10852 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
10853 {
10854 (self.inner, self.is_terminated)
10855 }
10856
10857 fn from_inner(
10858 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10859 is_terminated: bool,
10860 ) -> Self {
10861 Self { inner, is_terminated }
10862 }
10863}
10864
10865impl futures::Stream for BufferCollectionTokenGroupRequestStream {
10866 type Item = Result<BufferCollectionTokenGroupRequest, fidl::Error>;
10867
10868 fn poll_next(
10869 mut self: std::pin::Pin<&mut Self>,
10870 cx: &mut std::task::Context<'_>,
10871 ) -> std::task::Poll<Option<Self::Item>> {
10872 let this = &mut *self;
10873 if this.inner.check_shutdown(cx) {
10874 this.is_terminated = true;
10875 return std::task::Poll::Ready(None);
10876 }
10877 if this.is_terminated {
10878 panic!("polled BufferCollectionTokenGroupRequestStream after completion");
10879 }
10880 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
10881 |bytes, handles| {
10882 match this.inner.channel().read_etc(cx, bytes, handles) {
10883 std::task::Poll::Ready(Ok(())) => {}
10884 std::task::Poll::Pending => return std::task::Poll::Pending,
10885 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
10886 this.is_terminated = true;
10887 return std::task::Poll::Ready(None);
10888 }
10889 std::task::Poll::Ready(Err(e)) => {
10890 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
10891 e.into(),
10892 ))));
10893 }
10894 }
10895
10896 // A message has been received from the channel
10897 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10898
10899 std::task::Poll::Ready(Some(match header.ordinal {
10900 0x11ac2555cf575b54 => {
10901 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10902 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10903 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10904 let control_handle = BufferCollectionTokenGroupControlHandle {
10905 inner: this.inner.clone(),
10906 };
10907 Ok(BufferCollectionTokenGroupRequest::Sync {
10908 responder: BufferCollectionTokenGroupSyncResponder {
10909 control_handle: std::mem::ManuallyDrop::new(control_handle),
10910 tx_id: header.tx_id,
10911 },
10912 })
10913 }
10914 0x6a5cae7d6d6e04c6 => {
10915 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10916 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10917 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10918 let control_handle = BufferCollectionTokenGroupControlHandle {
10919 inner: this.inner.clone(),
10920 };
10921 Ok(BufferCollectionTokenGroupRequest::Release {
10922 control_handle,
10923 })
10924 }
10925 0xb41f1624f48c1e9 => {
10926 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10927 let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10928 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
10929 let control_handle = BufferCollectionTokenGroupControlHandle {
10930 inner: this.inner.clone(),
10931 };
10932 Ok(BufferCollectionTokenGroupRequest::SetName {payload: req,
10933 control_handle,
10934 })
10935 }
10936 0x5cde8914608d99b1 => {
10937 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10938 let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10939 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
10940 let control_handle = BufferCollectionTokenGroupControlHandle {
10941 inner: this.inner.clone(),
10942 };
10943 Ok(BufferCollectionTokenGroupRequest::SetDebugClientInfo {payload: req,
10944 control_handle,
10945 })
10946 }
10947 0x716b0af13d5c0806 => {
10948 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10949 let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10950 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
10951 let control_handle = BufferCollectionTokenGroupControlHandle {
10952 inner: this.inner.clone(),
10953 };
10954 Ok(BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {payload: req,
10955 control_handle,
10956 })
10957 }
10958 0x5209c77415b4dfad => {
10959 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10960 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10961 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10962 let control_handle = BufferCollectionTokenGroupControlHandle {
10963 inner: this.inner.clone(),
10964 };
10965 Ok(BufferCollectionTokenGroupRequest::SetVerboseLogging {
10966 control_handle,
10967 })
10968 }
10969 0x5b3d0e51614df053 => {
10970 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10971 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10972 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10973 let control_handle = BufferCollectionTokenGroupControlHandle {
10974 inner: this.inner.clone(),
10975 };
10976 Ok(BufferCollectionTokenGroupRequest::GetNodeRef {
10977 responder: BufferCollectionTokenGroupGetNodeRefResponder {
10978 control_handle: std::mem::ManuallyDrop::new(control_handle),
10979 tx_id: header.tx_id,
10980 },
10981 })
10982 }
10983 0x3a58e00157e0825 => {
10984 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10985 let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10986 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
10987 let control_handle = BufferCollectionTokenGroupControlHandle {
10988 inner: this.inner.clone(),
10989 };
10990 Ok(BufferCollectionTokenGroupRequest::IsAlternateFor {payload: req,
10991 responder: BufferCollectionTokenGroupIsAlternateForResponder {
10992 control_handle: std::mem::ManuallyDrop::new(control_handle),
10993 tx_id: header.tx_id,
10994 },
10995 })
10996 }
10997 0x77d19a494b78ba8c => {
10998 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10999 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
11000 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
11001 let control_handle = BufferCollectionTokenGroupControlHandle {
11002 inner: this.inner.clone(),
11003 };
11004 Ok(BufferCollectionTokenGroupRequest::GetBufferCollectionId {
11005 responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder {
11006 control_handle: std::mem::ManuallyDrop::new(control_handle),
11007 tx_id: header.tx_id,
11008 },
11009 })
11010 }
11011 0x22dd3ea514eeffe1 => {
11012 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11013 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
11014 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
11015 let control_handle = BufferCollectionTokenGroupControlHandle {
11016 inner: this.inner.clone(),
11017 };
11018 Ok(BufferCollectionTokenGroupRequest::SetWeak {
11019 control_handle,
11020 })
11021 }
11022 0x38a44fc4d7724be9 => {
11023 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11024 let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11025 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
11026 let control_handle = BufferCollectionTokenGroupControlHandle {
11027 inner: this.inner.clone(),
11028 };
11029 Ok(BufferCollectionTokenGroupRequest::SetWeakOk {payload: req,
11030 control_handle,
11031 })
11032 }
11033 0x3f22f2a293d3cdac => {
11034 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11035 let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11036 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
11037 let control_handle = BufferCollectionTokenGroupControlHandle {
11038 inner: this.inner.clone(),
11039 };
11040 Ok(BufferCollectionTokenGroupRequest::AttachNodeTracking {payload: req,
11041 control_handle,
11042 })
11043 }
11044 0x41a0075d419f30c5 => {
11045 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11046 let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11047 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildRequest>(&header, _body_bytes, handles, &mut req)?;
11048 let control_handle = BufferCollectionTokenGroupControlHandle {
11049 inner: this.inner.clone(),
11050 };
11051 Ok(BufferCollectionTokenGroupRequest::CreateChild {payload: req,
11052 control_handle,
11053 })
11054 }
11055 0x15dea448c536070a => {
11056 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
11057 let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildrenSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11058 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildrenSyncRequest>(&header, _body_bytes, handles, &mut req)?;
11059 let control_handle = BufferCollectionTokenGroupControlHandle {
11060 inner: this.inner.clone(),
11061 };
11062 Ok(BufferCollectionTokenGroupRequest::CreateChildrenSync {payload: req,
11063 responder: BufferCollectionTokenGroupCreateChildrenSyncResponder {
11064 control_handle: std::mem::ManuallyDrop::new(control_handle),
11065 tx_id: header.tx_id,
11066 },
11067 })
11068 }
11069 0x5c327e4a23391312 => {
11070 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11071 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
11072 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
11073 let control_handle = BufferCollectionTokenGroupControlHandle {
11074 inner: this.inner.clone(),
11075 };
11076 Ok(BufferCollectionTokenGroupRequest::AllChildrenPresent {
11077 control_handle,
11078 })
11079 }
11080 _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11081 Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11082 ordinal: header.ordinal,
11083 control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11084 method_type: fidl::MethodType::OneWay,
11085 })
11086 }
11087 _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11088 this.inner.send_framework_err(
11089 fidl::encoding::FrameworkErr::UnknownMethod,
11090 header.tx_id,
11091 header.ordinal,
11092 header.dynamic_flags(),
11093 (bytes, handles),
11094 )?;
11095 Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11096 ordinal: header.ordinal,
11097 control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11098 method_type: fidl::MethodType::TwoWay,
11099 })
11100 }
11101 _ => Err(fidl::Error::UnknownOrdinal {
11102 ordinal: header.ordinal,
11103 protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
11104 }),
11105 }))
11106 },
11107 )
11108 }
11109}
11110
11111/// The sysmem implementation is consistent with a logical / conceptual model of
11112/// allocation / logical allocation as follows:
11113///
11114/// As usual, a logical allocation considers either the root and all nodes with
11115/// connectivity to the root that don't transit a [`fuchsia.sysmem2/Node`]
11116/// created with [`fuchsia.sysmem2/BufferCollection.AttachToken`], or a subtree
11117/// rooted at an `AttachToken` `Node` and all `Node`(s) with connectivity to
11118/// that subtree that don't transit another `AttachToken`. This is called the
11119/// logical allocation pruned subtree, or pruned subtree for short.
11120///
11121/// During constraints aggregation, each
11122/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] will select a single child
11123/// `Node` among its direct children. The rest of the children will appear to
11124/// fail the logical allocation, while the selected child may succeed.
11125///
11126/// When more than one `BufferCollectionTokenGroup` exists in the overall
11127/// logical allocation pruned subtree, the relative priority between two groups
11128/// is equivalent to their ordering in a DFS pre-order iteration of the tree,
11129/// with parents higher priority than children, and left children higher
11130/// priority than right children.
11131///
11132/// When a particular child of a group is selected (whether provisionally during
11133/// a constraints aggregation attempt, or as a final selection), the
11134/// non-selection of other children of the group will "hide" any other groups
11135/// under those non-selected children.
11136///
11137/// Within a logical allocation, aggregation is attempted first by provisionally
11138/// selecting child 0 of the highest-priority group, and child 0 of the next
11139/// highest-priority group that isn't hidden by the provisional selections so
11140/// far, etc.
11141///
11142/// If that aggregation attempt fails, aggregation will be attempted with the
11143/// ordinal 0 child of all the same groups except the lowest priority non-hidden
11144/// group which will provisionally select its ordinal 1 child (and then child 2
11145/// and so on). If a new lowest-priority group is un-hidden as provisional
11146/// selections are updated, that newly un-hidden lowest-priority group has all
11147/// its children considered in order, before changing the provisional selection
11148/// in the former lowest-priority group. In terms of result, this is equivalent
11149/// to systematic enumeration of all possible combinations of choices in a
11150/// counting-like order updating the lowest-priority group the most often and
11151/// the highest-priority group the least often. Rather than actually attempting
11152/// aggregation with all the combinations, we can skip over combinations which
11153/// are redundant/equivalent due to hiding without any change to the result.
11154///
11155/// Attempted constraint aggregations of enumerated non-equivalent combinations
11156/// of choices continue in this manner until either (a) all aggregation attempts
11157/// fail in which case the overall logical allocation fails, or (b) until an
11158/// attempted aggregation succeeds, in which case buffer allocation (if needed;
11159/// if this is the pruned subtree rooted at the overall root `Node`) is
11160/// attempted once. If buffer allocation based on the first successful
11161/// constraints aggregation fails, the overall logical allocation fails (there
11162/// is no buffer allocation retry / re-attempt). If buffer allocation succeeds
11163/// (or is not needed due to being a pruned subtree that doesn't include the
11164/// root), the logical allocation succeeds.
11165///
11166/// If this prioritization scheme cannot reasonably work for your usage of
11167/// sysmem, please don't hesitate to contact sysmem folks to discuss potentially
11168/// adding a way to achieve what you need.
11169///
11170/// Please avoid creating a large number of `BufferCollectionTokenGroup`(s) per
11171/// logical allocation, especially with large number of children overall, and
11172/// especially in cases where aggregation may reasonably be expected to often
11173/// fail using ordinal 0 children and possibly with later children as well.
11174/// Sysmem mitigates potentially high time complexity of evaluating too many
11175/// child combinations/selections across too many groups by simply failing
11176/// logical allocation beyond a certain (fairly high, but not huge) max number
11177/// of considered group child combinations/selections. More advanced (and more
11178/// complicated) mitigation is not anticipated to be practically necessary or
11179/// worth the added complexity. Please contact sysmem folks if the max limit is
11180/// getting hit or if you anticipate it getting hit, to discuss potential
11181/// options.
11182///
11183/// Prefer to use multiple [`fuchsia.sysmem2/ImageFormatConstraints`] in a
11184/// single [`fuchsia.sysmem2/BufferCollectionConstraints`] when feasible (when a
11185/// participant just needs to express the ability to work with more than a
11186/// single [`fuchsia.images2/PixelFormat`], with sysmem choosing which
11187/// `PixelFormat` to use among those supported by all participants).
11188///
11189/// Similar to [`fuchsia.sysmem2/BufferCollectionToken`] and
11190/// [`fuchsia.sysmem2/BufferCollection`], closure of the
11191/// `BufferCollectionTokenGroup` channel without sending
11192/// [`fuchsia.sysmem2/Node.Release`] first will cause buffer collection failure
11193/// (or subtree failure if using
11194/// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11195/// [`fuchsia.sysmem2/BufferCollection.AttachToken`] and the
11196/// `BufferCollectionTokenGroup` is part of a subtree under such a node that
11197/// doesn't propagate failure to its parent).
11198///
11199/// Epitaphs are not used in this protocol.
11200#[derive(Debug)]
11201pub enum BufferCollectionTokenGroupRequest {
11202 /// Ensure that previous messages have been received server side. This is
11203 /// particularly useful after previous messages that created new tokens,
11204 /// because a token must be known to the sysmem server before sending the
11205 /// token to another participant.
11206 ///
11207 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
11208 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
11209 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
11210 /// to mitigate the possibility of a hostile/fake
11211 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
11212 /// Another way is to pass the token to
11213 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
11214 /// the token as part of exchanging it for a
11215 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
11216 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
11217 /// of stalling.
11218 ///
11219 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
11220 /// and then starting and completing a `Sync`, it's then safe to send the
11221 /// `BufferCollectionToken` client ends to other participants knowing the
11222 /// server will recognize the tokens when they're sent by the other
11223 /// participants to sysmem in a
11224 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
11225 /// efficient way to create tokens while avoiding unnecessary round trips.
11226 ///
11227 /// Other options include waiting for each
11228 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
11229 /// individually (using separate call to `Sync` after each), or calling
11230 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
11231 /// converted to a `BufferCollection` via
11232 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
11233 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
11234 /// the sync step and can create multiple tokens at once.
11235 Sync { responder: BufferCollectionTokenGroupSyncResponder },
11236 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
11237 ///
11238 /// Normally a participant will convert a `BufferCollectionToken` into a
11239 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
11240 /// `Release` via the token (and then close the channel immediately or
11241 /// shortly later in response to server closing the server end), which
11242 /// avoids causing buffer collection failure. Without a prior `Release`,
11243 /// closing the `BufferCollectionToken` client end will cause buffer
11244 /// collection failure.
11245 ///
11246 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
11247 ///
11248 /// By default the server handles unexpected closure of a
11249 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
11250 /// first) by failing the buffer collection. Partly this is to expedite
11251 /// closing VMO handles to reclaim memory when any participant fails. If a
11252 /// participant would like to cleanly close a `BufferCollection` without
11253 /// causing buffer collection failure, the participant can send `Release`
11254 /// before closing the `BufferCollection` client end. The `Release` can
11255 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
11256 /// buffer collection won't require constraints from this node in order to
11257 /// allocate. If after `SetConstraints`, the constraints are retained and
11258 /// aggregated, despite the lack of `BufferCollection` connection at the
11259 /// time of constraints aggregation.
11260 ///
11261 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
11262 ///
11263 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
11264 /// end (without `Release` first) will trigger failure of the buffer
11265 /// collection. To close a `BufferCollectionTokenGroup` channel without
11266 /// failing the buffer collection, ensure that AllChildrenPresent() has been
11267 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
11268 /// client end.
11269 ///
11270 /// If `Release` occurs before
11271 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
11272 /// buffer collection will fail (triggered by reception of `Release` without
11273 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
11274 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
11275 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
11276 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
11277 /// close requires `AllChildrenPresent` (if not already sent), then
11278 /// `Release`, then close client end.
11279 ///
11280 /// If `Release` occurs after `AllChildrenPresent`, the children and all
11281 /// their constraints remain intact (just as they would if the
11282 /// `BufferCollectionTokenGroup` channel had remained open), and the client
11283 /// end close doesn't trigger buffer collection failure.
11284 ///
11285 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
11286 ///
11287 /// For brevity, the per-channel-protocol paragraphs above ignore the
11288 /// separate failure domain created by
11289 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11290 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
11291 /// unexpectedly closes (without `Release` first) and that client end is
11292 /// under a failure domain, instead of failing the whole buffer collection,
11293 /// the failure domain is failed, but the buffer collection itself is
11294 /// isolated from failure of the failure domain. Such failure domains can be
11295 /// nested, in which case only the inner-most failure domain in which the
11296 /// `Node` resides fails.
11297 Release { control_handle: BufferCollectionTokenGroupControlHandle },
11298 /// Set a name for VMOs in this buffer collection.
11299 ///
11300 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
11301 /// will be truncated to fit. The name of the vmo will be suffixed with the
11302 /// buffer index within the collection (if the suffix fits within
11303 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
11304 /// listed in the inspect data.
11305 ///
11306 /// The name only affects VMOs allocated after the name is set; this call
11307 /// does not rename existing VMOs. If multiple clients set different names
11308 /// then the larger priority value will win. Setting a new name with the
11309 /// same priority as a prior name doesn't change the name.
11310 ///
11311 /// All table fields are currently required.
11312 ///
11313 /// + request `priority` The name is only set if this is the first `SetName`
11314 /// or if `priority` is greater than any previous `priority` value in
11315 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
11316 /// + request `name` The name for VMOs created under this buffer collection.
11317 SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenGroupControlHandle },
11318 /// Set information about the current client that can be used by sysmem to
11319 /// help diagnose leaking memory and allocation stalls waiting for a
11320 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
11321 ///
11322 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
11323 /// `Node`(s) derived from this `Node`, unless overriden by
11324 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
11325 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
11326 ///
11327 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
11328 /// `Allocator` is the most efficient way to ensure that all
11329 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
11330 /// set, and is also more efficient than separately sending the same debug
11331 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
11332 /// created [`fuchsia.sysmem2/Node`].
11333 ///
11334 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
11335 /// indicate which client is closing their channel first, leading to subtree
11336 /// failure (which can be normal if the purpose of the subtree is over, but
11337 /// if happening earlier than expected, the client-channel-specific name can
11338 /// help diagnose where the failure is first coming from, from sysmem's
11339 /// point of view).
11340 ///
11341 /// All table fields are currently required.
11342 ///
11343 /// + request `name` This can be an arbitrary string, but the current
11344 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
11345 /// + request `id` This can be an arbitrary id, but the current process ID
11346 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
11347 SetDebugClientInfo {
11348 payload: NodeSetDebugClientInfoRequest,
11349 control_handle: BufferCollectionTokenGroupControlHandle,
11350 },
11351 /// Sysmem logs a warning if sysmem hasn't seen
11352 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
11353 /// within 5 seconds after creation of a new collection.
11354 ///
11355 /// Clients can call this method to change when the log is printed. If
11356 /// multiple client set the deadline, it's unspecified which deadline will
11357 /// take effect.
11358 ///
11359 /// In most cases the default works well.
11360 ///
11361 /// All table fields are currently required.
11362 ///
11363 /// + request `deadline` The time at which sysmem will start trying to log
11364 /// the warning, unless all constraints are with sysmem by then.
11365 SetDebugTimeoutLogDeadline {
11366 payload: NodeSetDebugTimeoutLogDeadlineRequest,
11367 control_handle: BufferCollectionTokenGroupControlHandle,
11368 },
11369 /// This enables verbose logging for the buffer collection.
11370 ///
11371 /// Verbose logging includes constraints set via
11372 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
11373 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
11374 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
11375 /// the tree of `Node`(s).
11376 ///
11377 /// Normally sysmem prints only a single line complaint when aggregation
11378 /// fails, with just the specific detailed reason that aggregation failed,
11379 /// with little surrounding context. While this is often enough to diagnose
11380 /// a problem if only a small change was made and everything was working
11381 /// before the small change, it's often not particularly helpful for getting
11382 /// a new buffer collection to work for the first time. Especially with
11383 /// more complex trees of nodes, involving things like
11384 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
11385 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
11386 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
11387 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
11388 /// looks like and why it's failing a logical allocation, or why a tree or
11389 /// subtree is failing sooner than expected.
11390 ///
11391 /// The intent of the extra logging is to be acceptable from a performance
11392 /// point of view, under the assumption that verbose logging is only enabled
11393 /// on a low number of buffer collections. If we're not tracking down a bug,
11394 /// we shouldn't send this message.
11395 SetVerboseLogging { control_handle: BufferCollectionTokenGroupControlHandle },
11396 /// This gets a handle that can be used as a parameter to
11397 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
11398 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
11399 /// client obtained this handle from this `Node`.
11400 ///
11401 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
11402 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
11403 /// despite the two calls typically being on different channels.
11404 ///
11405 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
11406 ///
11407 /// All table fields are currently required.
11408 ///
11409 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
11410 /// different `Node` channel, to prove that the client obtained the handle
11411 /// from this `Node`.
11412 GetNodeRef { responder: BufferCollectionTokenGroupGetNodeRefResponder },
11413 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
11414 /// rooted at a different child token of a common parent
11415 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
11416 /// passed-in `node_ref`.
11417 ///
11418 /// This call is for assisting with admission control de-duplication, and
11419 /// with debugging.
11420 ///
11421 /// The `node_ref` must be obtained using
11422 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
11423 ///
11424 /// The `node_ref` can be a duplicated handle; it's not necessary to call
11425 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
11426 ///
11427 /// If a calling token may not actually be a valid token at all due to a
11428 /// potentially hostile/untrusted provider of the token, call
11429 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
11430 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
11431 /// never responds due to a calling token not being a real token (not really
11432 /// talking to sysmem). Another option is to call
11433 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
11434 /// which also validates the token along with converting it to a
11435 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
11436 ///
11437 /// All table fields are currently required.
11438 ///
11439 /// - response `is_alternate`
11440 /// - true: The first parent node in common between the calling node and
11441 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
11442 /// that the calling `Node` and the `node_ref` `Node` will not have both
11443 /// their constraints apply - rather sysmem will choose one or the other
11444 /// of the constraints - never both. This is because only one child of
11445 /// a `BufferCollectionTokenGroup` is selected during logical
11446 /// allocation, with only that one child's subtree contributing to
11447 /// constraints aggregation.
11448 /// - false: The first parent node in common between the calling `Node`
11449 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
11450 /// Currently, this means the first parent node in common is a
11451 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
11452 /// `Release`ed). This means that the calling `Node` and the `node_ref`
11453 /// `Node` may have both their constraints apply during constraints
11454 /// aggregation of the logical allocation, if both `Node`(s) are
11455 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
11456 /// this case, there is no `BufferCollectionTokenGroup` that will
11457 /// directly prevent the two `Node`(s) from both being selected and
11458 /// their constraints both aggregated, but even when false, one or both
11459 /// `Node`(s) may still be eliminated from consideration if one or both
11460 /// `Node`(s) has a direct or indirect parent
11461 /// `BufferCollectionTokenGroup` which selects a child subtree other
11462 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
11463 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
11464 /// associated with the same buffer collection as the calling `Node`.
11465 /// Another reason for this error is if the `node_ref` is an
11466 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
11467 /// a real `node_ref` obtained from `GetNodeRef`.
11468 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
11469 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
11470 /// the needed rights expected on a real `node_ref`.
11471 /// * No other failing status codes are returned by this call. However,
11472 /// sysmem may add additional codes in future, so the client should have
11473 /// sensible default handling for any failing status code.
11474 IsAlternateFor {
11475 payload: NodeIsAlternateForRequest,
11476 responder: BufferCollectionTokenGroupIsAlternateForResponder,
11477 },
11478 /// Get the buffer collection ID. This ID is also available from
11479 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
11480 /// within the collection).
11481 ///
11482 /// This call is mainly useful in situations where we can't convey a
11483 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
11484 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
11485 /// handle, which can be joined back up with a `BufferCollection` client end
11486 /// that was created via a different path. Prefer to convey a
11487 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
11488 ///
11489 /// Trusting a `buffer_collection_id` value from a source other than sysmem
11490 /// is analogous to trusting a koid value from a source other than zircon.
11491 /// Both should be avoided unless really necessary, and both require
11492 /// caution. In some situations it may be reasonable to refer to a
11493 /// pre-established `BufferCollection` by `buffer_collection_id` via a
11494 /// protocol for efficiency reasons, but an incoming value purporting to be
11495 /// a `buffer_collection_id` is not sufficient alone to justify granting the
11496 /// sender of the `buffer_collection_id` any capability. The sender must
11497 /// first prove to a receiver that the sender has/had a VMO or has/had a
11498 /// `BufferCollectionToken` to the same collection by sending a handle that
11499 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
11500 /// `buffer_collection_id` value. The receiver should take care to avoid
11501 /// assuming that a sender had a `BufferCollectionToken` in cases where the
11502 /// sender has only proven that the sender had a VMO.
11503 ///
11504 /// - response `buffer_collection_id` This ID is unique per buffer
11505 /// collection per boot. Each buffer is uniquely identified by the
11506 /// `buffer_collection_id` and `buffer_index` together.
11507 GetBufferCollectionId { responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder },
11508 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
11509 /// created after this message to weak, which means that a client's `Node`
11510 /// client end (or a child created after this message) is not alone
11511 /// sufficient to keep allocated VMOs alive.
11512 ///
11513 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
11514 /// `close_weak_asap`.
11515 ///
11516 /// This message is only permitted before the `Node` becomes ready for
11517 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
11518 /// * `BufferCollectionToken`: any time
11519 /// * `BufferCollection`: before `SetConstraints`
11520 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
11521 ///
11522 /// Currently, no conversion from strong `Node` to weak `Node` after ready
11523 /// for allocation is provided, but a client can simulate that by creating
11524 /// an additional `Node` before allocation and setting that additional
11525 /// `Node` to weak, and then potentially at some point later sending
11526 /// `Release` and closing the client end of the client's strong `Node`, but
11527 /// keeping the client's weak `Node`.
11528 ///
11529 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
11530 /// collection failure (all `Node` client end(s) will see
11531 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
11532 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
11533 /// this situation until all `Node`(s) are ready for allocation. For initial
11534 /// allocation to succeed, at least one strong `Node` is required to exist
11535 /// at allocation time, but after that client receives VMO handles, that
11536 /// client can `BufferCollection.Release` and close the client end without
11537 /// causing this type of failure.
11538 ///
11539 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
11540 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
11541 /// separately as appropriate.
11542 SetWeak { control_handle: BufferCollectionTokenGroupControlHandle },
11543 /// This indicates to sysmem that the client is prepared to pay attention to
11544 /// `close_weak_asap`.
11545 ///
11546 /// If sent, this message must be before
11547 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
11548 ///
11549 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
11550 /// send this message before `WaitForAllBuffersAllocated`, or a parent
11551 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
11552 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
11553 /// trigger buffer collection failure.
11554 ///
11555 /// This message is necessary because weak sysmem VMOs have not always been
11556 /// a thing, so older clients are not aware of the need to pay attention to
11557 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
11558 /// sysmem weak VMO handles asap. By having this message and requiring
11559 /// participants to indicate their acceptance of this aspect of the overall
11560 /// protocol, we avoid situations where an older client is delivered a weak
11561 /// VMO without any way for sysmem to get that VMO to close quickly later
11562 /// (and on a per-buffer basis).
11563 ///
11564 /// A participant that doesn't handle `close_weak_asap` and also doesn't
11565 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
11566 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
11567 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
11568 /// same participant has a child/delegate which does retrieve VMOs, that
11569 /// child/delegate will need to send `SetWeakOk` before
11570 /// `WaitForAllBuffersAllocated`.
11571 ///
11572 /// + request `for_child_nodes_also` If present and true, this means direct
11573 /// child nodes of this node created after this message plus all
11574 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
11575 /// those nodes. Any child node of this node that was created before this
11576 /// message is not included. This setting is "sticky" in the sense that a
11577 /// subsequent `SetWeakOk` without this bool set to true does not reset
11578 /// the server-side bool. If this creates a problem for a participant, a
11579 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
11580 /// tokens instead, as appropriate. A participant should only set
11581 /// `for_child_nodes_also` true if the participant can really promise to
11582 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
11583 /// weak VMO handles held by participants holding the corresponding child
11584 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
11585 /// which are using sysmem(1) can be weak, despite the clients of those
11586 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
11587 /// direct way to find out about `close_weak_asap`. This only applies to
11588 /// descendents of this `Node` which are using sysmem(1), not to this
11589 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
11590 /// token, which will fail allocation unless an ancestor of this `Node`
11591 /// specified `for_child_nodes_also` true.
11592 SetWeakOk {
11593 payload: NodeSetWeakOkRequest,
11594 control_handle: BufferCollectionTokenGroupControlHandle,
11595 },
11596 /// The server_end will be closed after this `Node` and any child nodes have
11597 /// have released their buffer counts, making those counts available for
11598 /// reservation by a different `Node` via
11599 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
11600 ///
11601 /// The `Node` buffer counts may not be released until the entire tree of
11602 /// `Node`(s) is closed or failed, because
11603 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
11604 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
11605 /// `Node` buffer counts remain reserved until the orphaned node is later
11606 /// cleaned up.
11607 ///
11608 /// If the `Node` exceeds a fairly large number of attached eventpair server
11609 /// ends, a log message will indicate this and the `Node` (and the
11610 /// appropriate) sub-tree will fail.
11611 ///
11612 /// The `server_end` will remain open when
11613 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
11614 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
11615 /// [`fuchsia.sysmem2/BufferCollection`].
11616 ///
11617 /// This message can also be used with a
11618 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
11619 AttachNodeTracking {
11620 payload: NodeAttachNodeTrackingRequest,
11621 control_handle: BufferCollectionTokenGroupControlHandle,
11622 },
11623 /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
11624 /// (including its children) will be selected during allocation (or logical
11625 /// allocation).
11626 ///
11627 /// Before passing the client end of this token to
11628 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
11629 /// [`fuchsia.sysmem2/Node.Sync`] after
11630 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
11631 /// Or the client can use
11632 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
11633 /// essentially includes the `Sync`.
11634 ///
11635 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11636 /// fail the group's subtree and close the connection.
11637 ///
11638 /// After all children have been created, send AllChildrenPresent.
11639 ///
11640 /// + request `token_request` The server end of the new token channel.
11641 /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
11642 /// token allows the holder to get the same rights to buffers as the
11643 /// parent token (of the group) had. When the value isn't
11644 /// ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
11645 /// bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
11646 /// for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
11647 /// causes subtree failure.
11648 CreateChild {
11649 payload: BufferCollectionTokenGroupCreateChildRequest,
11650 control_handle: BufferCollectionTokenGroupControlHandle,
11651 },
11652 /// Create 1 or more child tokens at once, synchronously. In contrast to
11653 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
11654 /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
11655 /// of a returned token to
11656 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
11657 ///
11658 /// The lower-index child tokens are higher priority (attempted sooner) than
11659 /// higher-index child tokens.
11660 ///
11661 /// As per all child tokens, successful aggregation will choose exactly one
11662 /// child among all created children (across all children created across
11663 /// potentially multiple calls to
11664 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
11665 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
11666 ///
11667 /// The maximum permissible total number of children per group, and total
11668 /// number of nodes in an overall tree (from the root) are capped to limits
11669 /// which are not configurable via these protocols.
11670 ///
11671 /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
11672 /// this will fail the group's subtree and close the connection.
11673 ///
11674 /// After all children have been created, send AllChildrenPresent.
11675 ///
11676 /// + request `rights_attentuation_masks` The size of the
11677 /// `rights_attentuation_masks` determines the number of created child
11678 /// tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
11679 /// The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
11680 /// other value, each 0 bit in the mask attenuates that right.
11681 /// - response `tokens` The created child tokens.
11682 CreateChildrenSync {
11683 payload: BufferCollectionTokenGroupCreateChildrenSyncRequest,
11684 responder: BufferCollectionTokenGroupCreateChildrenSyncResponder,
11685 },
11686 /// Indicate that no more children will be created.
11687 ///
11688 /// After creating all children, the client should send
11689 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
11690 /// inform sysmem that no more children will be created, so that sysmem can
11691 /// know when it's ok to start aggregating constraints.
11692 ///
11693 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11694 /// fail the group's subtree and close the connection.
11695 ///
11696 /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
11697 /// after `AllChildrenPresent`, else failure of the group's subtree will be
11698 /// triggered. This is intentionally not analogous to how `Release` without
11699 /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
11700 /// subtree failure.
11701 AllChildrenPresent { control_handle: BufferCollectionTokenGroupControlHandle },
11702 /// An interaction was received which does not match any known method.
11703 #[non_exhaustive]
11704 _UnknownMethod {
11705 /// Ordinal of the method that was called.
11706 ordinal: u64,
11707 control_handle: BufferCollectionTokenGroupControlHandle,
11708 method_type: fidl::MethodType,
11709 },
11710}
11711
11712impl BufferCollectionTokenGroupRequest {
11713 #[allow(irrefutable_let_patterns)]
11714 pub fn into_sync(self) -> Option<(BufferCollectionTokenGroupSyncResponder)> {
11715 if let BufferCollectionTokenGroupRequest::Sync { responder } = self {
11716 Some((responder))
11717 } else {
11718 None
11719 }
11720 }
11721
11722 #[allow(irrefutable_let_patterns)]
11723 pub fn into_release(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11724 if let BufferCollectionTokenGroupRequest::Release { control_handle } = self {
11725 Some((control_handle))
11726 } else {
11727 None
11728 }
11729 }
11730
11731 #[allow(irrefutable_let_patterns)]
11732 pub fn into_set_name(
11733 self,
11734 ) -> Option<(NodeSetNameRequest, BufferCollectionTokenGroupControlHandle)> {
11735 if let BufferCollectionTokenGroupRequest::SetName { payload, control_handle } = self {
11736 Some((payload, control_handle))
11737 } else {
11738 None
11739 }
11740 }
11741
11742 #[allow(irrefutable_let_patterns)]
11743 pub fn into_set_debug_client_info(
11744 self,
11745 ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenGroupControlHandle)> {
11746 if let BufferCollectionTokenGroupRequest::SetDebugClientInfo { payload, control_handle } =
11747 self
11748 {
11749 Some((payload, control_handle))
11750 } else {
11751 None
11752 }
11753 }
11754
11755 #[allow(irrefutable_let_patterns)]
11756 pub fn into_set_debug_timeout_log_deadline(
11757 self,
11758 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenGroupControlHandle)>
11759 {
11760 if let BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {
11761 payload,
11762 control_handle,
11763 } = self
11764 {
11765 Some((payload, control_handle))
11766 } else {
11767 None
11768 }
11769 }
11770
11771 #[allow(irrefutable_let_patterns)]
11772 pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11773 if let BufferCollectionTokenGroupRequest::SetVerboseLogging { control_handle } = self {
11774 Some((control_handle))
11775 } else {
11776 None
11777 }
11778 }
11779
11780 #[allow(irrefutable_let_patterns)]
11781 pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGroupGetNodeRefResponder)> {
11782 if let BufferCollectionTokenGroupRequest::GetNodeRef { responder } = self {
11783 Some((responder))
11784 } else {
11785 None
11786 }
11787 }
11788
11789 #[allow(irrefutable_let_patterns)]
11790 pub fn into_is_alternate_for(
11791 self,
11792 ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenGroupIsAlternateForResponder)>
11793 {
11794 if let BufferCollectionTokenGroupRequest::IsAlternateFor { payload, responder } = self {
11795 Some((payload, responder))
11796 } else {
11797 None
11798 }
11799 }
11800
11801 #[allow(irrefutable_let_patterns)]
11802 pub fn into_get_buffer_collection_id(
11803 self,
11804 ) -> Option<(BufferCollectionTokenGroupGetBufferCollectionIdResponder)> {
11805 if let BufferCollectionTokenGroupRequest::GetBufferCollectionId { responder } = self {
11806 Some((responder))
11807 } else {
11808 None
11809 }
11810 }
11811
11812 #[allow(irrefutable_let_patterns)]
11813 pub fn into_set_weak(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11814 if let BufferCollectionTokenGroupRequest::SetWeak { control_handle } = self {
11815 Some((control_handle))
11816 } else {
11817 None
11818 }
11819 }
11820
11821 #[allow(irrefutable_let_patterns)]
11822 pub fn into_set_weak_ok(
11823 self,
11824 ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenGroupControlHandle)> {
11825 if let BufferCollectionTokenGroupRequest::SetWeakOk { payload, control_handle } = self {
11826 Some((payload, control_handle))
11827 } else {
11828 None
11829 }
11830 }
11831
11832 #[allow(irrefutable_let_patterns)]
11833 pub fn into_attach_node_tracking(
11834 self,
11835 ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenGroupControlHandle)> {
11836 if let BufferCollectionTokenGroupRequest::AttachNodeTracking { payload, control_handle } =
11837 self
11838 {
11839 Some((payload, control_handle))
11840 } else {
11841 None
11842 }
11843 }
11844
11845 #[allow(irrefutable_let_patterns)]
11846 pub fn into_create_child(
11847 self,
11848 ) -> Option<(
11849 BufferCollectionTokenGroupCreateChildRequest,
11850 BufferCollectionTokenGroupControlHandle,
11851 )> {
11852 if let BufferCollectionTokenGroupRequest::CreateChild { payload, control_handle } = self {
11853 Some((payload, control_handle))
11854 } else {
11855 None
11856 }
11857 }
11858
11859 #[allow(irrefutable_let_patterns)]
11860 pub fn into_create_children_sync(
11861 self,
11862 ) -> Option<(
11863 BufferCollectionTokenGroupCreateChildrenSyncRequest,
11864 BufferCollectionTokenGroupCreateChildrenSyncResponder,
11865 )> {
11866 if let BufferCollectionTokenGroupRequest::CreateChildrenSync { payload, responder } = self {
11867 Some((payload, responder))
11868 } else {
11869 None
11870 }
11871 }
11872
11873 #[allow(irrefutable_let_patterns)]
11874 pub fn into_all_children_present(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11875 if let BufferCollectionTokenGroupRequest::AllChildrenPresent { control_handle } = self {
11876 Some((control_handle))
11877 } else {
11878 None
11879 }
11880 }
11881
11882 /// Name of the method defined in FIDL
11883 pub fn method_name(&self) -> &'static str {
11884 match *self {
11885 BufferCollectionTokenGroupRequest::Sync { .. } => "sync",
11886 BufferCollectionTokenGroupRequest::Release { .. } => "release",
11887 BufferCollectionTokenGroupRequest::SetName { .. } => "set_name",
11888 BufferCollectionTokenGroupRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
11889 BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline { .. } => {
11890 "set_debug_timeout_log_deadline"
11891 }
11892 BufferCollectionTokenGroupRequest::SetVerboseLogging { .. } => "set_verbose_logging",
11893 BufferCollectionTokenGroupRequest::GetNodeRef { .. } => "get_node_ref",
11894 BufferCollectionTokenGroupRequest::IsAlternateFor { .. } => "is_alternate_for",
11895 BufferCollectionTokenGroupRequest::GetBufferCollectionId { .. } => {
11896 "get_buffer_collection_id"
11897 }
11898 BufferCollectionTokenGroupRequest::SetWeak { .. } => "set_weak",
11899 BufferCollectionTokenGroupRequest::SetWeakOk { .. } => "set_weak_ok",
11900 BufferCollectionTokenGroupRequest::AttachNodeTracking { .. } => "attach_node_tracking",
11901 BufferCollectionTokenGroupRequest::CreateChild { .. } => "create_child",
11902 BufferCollectionTokenGroupRequest::CreateChildrenSync { .. } => "create_children_sync",
11903 BufferCollectionTokenGroupRequest::AllChildrenPresent { .. } => "all_children_present",
11904 BufferCollectionTokenGroupRequest::_UnknownMethod {
11905 method_type: fidl::MethodType::OneWay,
11906 ..
11907 } => "unknown one-way method",
11908 BufferCollectionTokenGroupRequest::_UnknownMethod {
11909 method_type: fidl::MethodType::TwoWay,
11910 ..
11911 } => "unknown two-way method",
11912 }
11913 }
11914}
11915
11916#[derive(Debug, Clone)]
11917pub struct BufferCollectionTokenGroupControlHandle {
11918 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
11919}
11920
11921impl fidl::endpoints::ControlHandle for BufferCollectionTokenGroupControlHandle {
11922 fn shutdown(&self) {
11923 self.inner.shutdown()
11924 }
11925
11926 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
11927 self.inner.shutdown_with_epitaph(status)
11928 }
11929
11930 fn is_closed(&self) -> bool {
11931 self.inner.channel().is_closed()
11932 }
11933 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
11934 self.inner.channel().on_closed()
11935 }
11936
11937 #[cfg(target_os = "fuchsia")]
11938 fn signal_peer(
11939 &self,
11940 clear_mask: zx::Signals,
11941 set_mask: zx::Signals,
11942 ) -> Result<(), zx_status::Status> {
11943 use fidl::Peered;
11944 self.inner.channel().signal_peer(clear_mask, set_mask)
11945 }
11946}
11947
11948impl BufferCollectionTokenGroupControlHandle {}
11949
11950#[must_use = "FIDL methods require a response to be sent"]
11951#[derive(Debug)]
11952pub struct BufferCollectionTokenGroupSyncResponder {
11953 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11954 tx_id: u32,
11955}
11956
11957/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11958/// if the responder is dropped without sending a response, so that the client
11959/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11960impl std::ops::Drop for BufferCollectionTokenGroupSyncResponder {
11961 fn drop(&mut self) {
11962 self.control_handle.shutdown();
11963 // Safety: drops once, never accessed again
11964 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11965 }
11966}
11967
11968impl fidl::endpoints::Responder for BufferCollectionTokenGroupSyncResponder {
11969 type ControlHandle = BufferCollectionTokenGroupControlHandle;
11970
11971 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11972 &self.control_handle
11973 }
11974
11975 fn drop_without_shutdown(mut self) {
11976 // Safety: drops once, never accessed again due to mem::forget
11977 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11978 // Prevent Drop from running (which would shut down the channel)
11979 std::mem::forget(self);
11980 }
11981}
11982
11983impl BufferCollectionTokenGroupSyncResponder {
11984 /// Sends a response to the FIDL transaction.
11985 ///
11986 /// Sets the channel to shutdown if an error occurs.
11987 pub fn send(self) -> Result<(), fidl::Error> {
11988 let _result = self.send_raw();
11989 if _result.is_err() {
11990 self.control_handle.shutdown();
11991 }
11992 self.drop_without_shutdown();
11993 _result
11994 }
11995
11996 /// Similar to "send" but does not shutdown the channel if an error occurs.
11997 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
11998 let _result = self.send_raw();
11999 self.drop_without_shutdown();
12000 _result
12001 }
12002
12003 fn send_raw(&self) -> Result<(), fidl::Error> {
12004 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
12005 fidl::encoding::Flexible::new(()),
12006 self.tx_id,
12007 0x11ac2555cf575b54,
12008 fidl::encoding::DynamicFlags::FLEXIBLE,
12009 )
12010 }
12011}
12012
12013#[must_use = "FIDL methods require a response to be sent"]
12014#[derive(Debug)]
12015pub struct BufferCollectionTokenGroupGetNodeRefResponder {
12016 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12017 tx_id: u32,
12018}
12019
12020/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12021/// if the responder is dropped without sending a response, so that the client
12022/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12023impl std::ops::Drop for BufferCollectionTokenGroupGetNodeRefResponder {
12024 fn drop(&mut self) {
12025 self.control_handle.shutdown();
12026 // Safety: drops once, never accessed again
12027 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12028 }
12029}
12030
12031impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetNodeRefResponder {
12032 type ControlHandle = BufferCollectionTokenGroupControlHandle;
12033
12034 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12035 &self.control_handle
12036 }
12037
12038 fn drop_without_shutdown(mut self) {
12039 // Safety: drops once, never accessed again due to mem::forget
12040 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12041 // Prevent Drop from running (which would shut down the channel)
12042 std::mem::forget(self);
12043 }
12044}
12045
12046impl BufferCollectionTokenGroupGetNodeRefResponder {
12047 /// Sends a response to the FIDL transaction.
12048 ///
12049 /// Sets the channel to shutdown if an error occurs.
12050 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
12051 let _result = self.send_raw(payload);
12052 if _result.is_err() {
12053 self.control_handle.shutdown();
12054 }
12055 self.drop_without_shutdown();
12056 _result
12057 }
12058
12059 /// Similar to "send" but does not shutdown the channel if an error occurs.
12060 pub fn send_no_shutdown_on_err(
12061 self,
12062 mut payload: NodeGetNodeRefResponse,
12063 ) -> Result<(), fidl::Error> {
12064 let _result = self.send_raw(payload);
12065 self.drop_without_shutdown();
12066 _result
12067 }
12068
12069 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
12070 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
12071 fidl::encoding::Flexible::new(&mut payload),
12072 self.tx_id,
12073 0x5b3d0e51614df053,
12074 fidl::encoding::DynamicFlags::FLEXIBLE,
12075 )
12076 }
12077}
12078
12079#[must_use = "FIDL methods require a response to be sent"]
12080#[derive(Debug)]
12081pub struct BufferCollectionTokenGroupIsAlternateForResponder {
12082 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12083 tx_id: u32,
12084}
12085
12086/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12087/// if the responder is dropped without sending a response, so that the client
12088/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12089impl std::ops::Drop for BufferCollectionTokenGroupIsAlternateForResponder {
12090 fn drop(&mut self) {
12091 self.control_handle.shutdown();
12092 // Safety: drops once, never accessed again
12093 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12094 }
12095}
12096
12097impl fidl::endpoints::Responder for BufferCollectionTokenGroupIsAlternateForResponder {
12098 type ControlHandle = BufferCollectionTokenGroupControlHandle;
12099
12100 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12101 &self.control_handle
12102 }
12103
12104 fn drop_without_shutdown(mut self) {
12105 // Safety: drops once, never accessed again due to mem::forget
12106 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12107 // Prevent Drop from running (which would shut down the channel)
12108 std::mem::forget(self);
12109 }
12110}
12111
12112impl BufferCollectionTokenGroupIsAlternateForResponder {
12113 /// Sends a response to the FIDL transaction.
12114 ///
12115 /// Sets the channel to shutdown if an error occurs.
12116 pub fn send(
12117 self,
12118 mut result: Result<&NodeIsAlternateForResponse, Error>,
12119 ) -> Result<(), fidl::Error> {
12120 let _result = self.send_raw(result);
12121 if _result.is_err() {
12122 self.control_handle.shutdown();
12123 }
12124 self.drop_without_shutdown();
12125 _result
12126 }
12127
12128 /// Similar to "send" but does not shutdown the channel if an error occurs.
12129 pub fn send_no_shutdown_on_err(
12130 self,
12131 mut result: Result<&NodeIsAlternateForResponse, Error>,
12132 ) -> Result<(), fidl::Error> {
12133 let _result = self.send_raw(result);
12134 self.drop_without_shutdown();
12135 _result
12136 }
12137
12138 fn send_raw(
12139 &self,
12140 mut result: Result<&NodeIsAlternateForResponse, Error>,
12141 ) -> Result<(), fidl::Error> {
12142 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
12143 NodeIsAlternateForResponse,
12144 Error,
12145 >>(
12146 fidl::encoding::FlexibleResult::new(result),
12147 self.tx_id,
12148 0x3a58e00157e0825,
12149 fidl::encoding::DynamicFlags::FLEXIBLE,
12150 )
12151 }
12152}
12153
12154#[must_use = "FIDL methods require a response to be sent"]
12155#[derive(Debug)]
12156pub struct BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12157 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12158 tx_id: u32,
12159}
12160
12161/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12162/// if the responder is dropped without sending a response, so that the client
12163/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12164impl std::ops::Drop for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12165 fn drop(&mut self) {
12166 self.control_handle.shutdown();
12167 // Safety: drops once, never accessed again
12168 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12169 }
12170}
12171
12172impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12173 type ControlHandle = BufferCollectionTokenGroupControlHandle;
12174
12175 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12176 &self.control_handle
12177 }
12178
12179 fn drop_without_shutdown(mut self) {
12180 // Safety: drops once, never accessed again due to mem::forget
12181 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12182 // Prevent Drop from running (which would shut down the channel)
12183 std::mem::forget(self);
12184 }
12185}
12186
12187impl BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12188 /// Sends a response to the FIDL transaction.
12189 ///
12190 /// Sets the channel to shutdown if an error occurs.
12191 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12192 let _result = self.send_raw(payload);
12193 if _result.is_err() {
12194 self.control_handle.shutdown();
12195 }
12196 self.drop_without_shutdown();
12197 _result
12198 }
12199
12200 /// Similar to "send" but does not shutdown the channel if an error occurs.
12201 pub fn send_no_shutdown_on_err(
12202 self,
12203 mut payload: &NodeGetBufferCollectionIdResponse,
12204 ) -> Result<(), fidl::Error> {
12205 let _result = self.send_raw(payload);
12206 self.drop_without_shutdown();
12207 _result
12208 }
12209
12210 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12211 self.control_handle
12212 .inner
12213 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
12214 fidl::encoding::Flexible::new(payload),
12215 self.tx_id,
12216 0x77d19a494b78ba8c,
12217 fidl::encoding::DynamicFlags::FLEXIBLE,
12218 )
12219 }
12220}
12221
12222#[must_use = "FIDL methods require a response to be sent"]
12223#[derive(Debug)]
12224pub struct BufferCollectionTokenGroupCreateChildrenSyncResponder {
12225 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12226 tx_id: u32,
12227}
12228
12229/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12230/// if the responder is dropped without sending a response, so that the client
12231/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12232impl std::ops::Drop for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12233 fn drop(&mut self) {
12234 self.control_handle.shutdown();
12235 // Safety: drops once, never accessed again
12236 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12237 }
12238}
12239
12240impl fidl::endpoints::Responder for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12241 type ControlHandle = BufferCollectionTokenGroupControlHandle;
12242
12243 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12244 &self.control_handle
12245 }
12246
12247 fn drop_without_shutdown(mut self) {
12248 // Safety: drops once, never accessed again due to mem::forget
12249 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12250 // Prevent Drop from running (which would shut down the channel)
12251 std::mem::forget(self);
12252 }
12253}
12254
12255impl BufferCollectionTokenGroupCreateChildrenSyncResponder {
12256 /// Sends a response to the FIDL transaction.
12257 ///
12258 /// Sets the channel to shutdown if an error occurs.
12259 pub fn send(
12260 self,
12261 mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12262 ) -> Result<(), fidl::Error> {
12263 let _result = self.send_raw(payload);
12264 if _result.is_err() {
12265 self.control_handle.shutdown();
12266 }
12267 self.drop_without_shutdown();
12268 _result
12269 }
12270
12271 /// Similar to "send" but does not shutdown the channel if an error occurs.
12272 pub fn send_no_shutdown_on_err(
12273 self,
12274 mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12275 ) -> Result<(), fidl::Error> {
12276 let _result = self.send_raw(payload);
12277 self.drop_without_shutdown();
12278 _result
12279 }
12280
12281 fn send_raw(
12282 &self,
12283 mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12284 ) -> Result<(), fidl::Error> {
12285 self.control_handle.inner.send::<fidl::encoding::FlexibleType<
12286 BufferCollectionTokenGroupCreateChildrenSyncResponse,
12287 >>(
12288 fidl::encoding::Flexible::new(&mut payload),
12289 self.tx_id,
12290 0x15dea448c536070a,
12291 fidl::encoding::DynamicFlags::FLEXIBLE,
12292 )
12293 }
12294}
12295
12296#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
12297pub struct NodeMarker;
12298
12299impl fidl::endpoints::ProtocolMarker for NodeMarker {
12300 type Proxy = NodeProxy;
12301 type RequestStream = NodeRequestStream;
12302 #[cfg(target_os = "fuchsia")]
12303 type SynchronousProxy = NodeSynchronousProxy;
12304
12305 const DEBUG_NAME: &'static str = "(anonymous) Node";
12306}
12307pub type NodeIsAlternateForResult = Result<NodeIsAlternateForResponse, Error>;
12308
12309pub trait NodeProxyInterface: Send + Sync {
12310 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
12311 fn r#sync(&self) -> Self::SyncResponseFut;
12312 fn r#release(&self) -> Result<(), fidl::Error>;
12313 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
12314 fn r#set_debug_client_info(
12315 &self,
12316 payload: &NodeSetDebugClientInfoRequest,
12317 ) -> Result<(), fidl::Error>;
12318 fn r#set_debug_timeout_log_deadline(
12319 &self,
12320 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12321 ) -> Result<(), fidl::Error>;
12322 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
12323 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
12324 + Send;
12325 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
12326 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
12327 + Send;
12328 fn r#is_alternate_for(
12329 &self,
12330 payload: NodeIsAlternateForRequest,
12331 ) -> Self::IsAlternateForResponseFut;
12332 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
12333 + Send;
12334 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
12335 fn r#set_weak(&self) -> Result<(), fidl::Error>;
12336 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
12337 fn r#attach_node_tracking(
12338 &self,
12339 payload: NodeAttachNodeTrackingRequest,
12340 ) -> Result<(), fidl::Error>;
12341}
12342#[derive(Debug)]
12343#[cfg(target_os = "fuchsia")]
12344pub struct NodeSynchronousProxy {
12345 client: fidl::client::sync::Client,
12346}
12347
12348#[cfg(target_os = "fuchsia")]
12349impl fidl::endpoints::SynchronousProxy for NodeSynchronousProxy {
12350 type Proxy = NodeProxy;
12351 type Protocol = NodeMarker;
12352
12353 fn from_channel(inner: fidl::Channel) -> Self {
12354 Self::new(inner)
12355 }
12356
12357 fn into_channel(self) -> fidl::Channel {
12358 self.client.into_channel()
12359 }
12360
12361 fn as_channel(&self) -> &fidl::Channel {
12362 self.client.as_channel()
12363 }
12364}
12365
12366#[cfg(target_os = "fuchsia")]
12367impl NodeSynchronousProxy {
12368 pub fn new(channel: fidl::Channel) -> Self {
12369 Self { client: fidl::client::sync::Client::new(channel) }
12370 }
12371
12372 pub fn into_channel(self) -> fidl::Channel {
12373 self.client.into_channel()
12374 }
12375
12376 /// Waits until an event arrives and returns it. It is safe for other
12377 /// threads to make concurrent requests while waiting for an event.
12378 pub fn wait_for_event(&self, deadline: zx::MonotonicInstant) -> Result<NodeEvent, fidl::Error> {
12379 NodeEvent::decode(self.client.wait_for_event::<NodeMarker>(deadline)?)
12380 }
12381
12382 /// Ensure that previous messages have been received server side. This is
12383 /// particularly useful after previous messages that created new tokens,
12384 /// because a token must be known to the sysmem server before sending the
12385 /// token to another participant.
12386 ///
12387 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12388 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12389 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12390 /// to mitigate the possibility of a hostile/fake
12391 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12392 /// Another way is to pass the token to
12393 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12394 /// the token as part of exchanging it for a
12395 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12396 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12397 /// of stalling.
12398 ///
12399 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12400 /// and then starting and completing a `Sync`, it's then safe to send the
12401 /// `BufferCollectionToken` client ends to other participants knowing the
12402 /// server will recognize the tokens when they're sent by the other
12403 /// participants to sysmem in a
12404 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12405 /// efficient way to create tokens while avoiding unnecessary round trips.
12406 ///
12407 /// Other options include waiting for each
12408 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12409 /// individually (using separate call to `Sync` after each), or calling
12410 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12411 /// converted to a `BufferCollection` via
12412 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12413 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12414 /// the sync step and can create multiple tokens at once.
12415 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
12416 let _response = self.client.send_query::<
12417 fidl::encoding::EmptyPayload,
12418 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
12419 NodeMarker,
12420 >(
12421 (),
12422 0x11ac2555cf575b54,
12423 fidl::encoding::DynamicFlags::FLEXIBLE,
12424 ___deadline,
12425 )?
12426 .into_result::<NodeMarker>("sync")?;
12427 Ok(_response)
12428 }
12429
12430 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12431 ///
12432 /// Normally a participant will convert a `BufferCollectionToken` into a
12433 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12434 /// `Release` via the token (and then close the channel immediately or
12435 /// shortly later in response to server closing the server end), which
12436 /// avoids causing buffer collection failure. Without a prior `Release`,
12437 /// closing the `BufferCollectionToken` client end will cause buffer
12438 /// collection failure.
12439 ///
12440 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12441 ///
12442 /// By default the server handles unexpected closure of a
12443 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12444 /// first) by failing the buffer collection. Partly this is to expedite
12445 /// closing VMO handles to reclaim memory when any participant fails. If a
12446 /// participant would like to cleanly close a `BufferCollection` without
12447 /// causing buffer collection failure, the participant can send `Release`
12448 /// before closing the `BufferCollection` client end. The `Release` can
12449 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12450 /// buffer collection won't require constraints from this node in order to
12451 /// allocate. If after `SetConstraints`, the constraints are retained and
12452 /// aggregated, despite the lack of `BufferCollection` connection at the
12453 /// time of constraints aggregation.
12454 ///
12455 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
12456 ///
12457 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
12458 /// end (without `Release` first) will trigger failure of the buffer
12459 /// collection. To close a `BufferCollectionTokenGroup` channel without
12460 /// failing the buffer collection, ensure that AllChildrenPresent() has been
12461 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
12462 /// client end.
12463 ///
12464 /// If `Release` occurs before
12465 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
12466 /// buffer collection will fail (triggered by reception of `Release` without
12467 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
12468 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
12469 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
12470 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
12471 /// close requires `AllChildrenPresent` (if not already sent), then
12472 /// `Release`, then close client end.
12473 ///
12474 /// If `Release` occurs after `AllChildrenPresent`, the children and all
12475 /// their constraints remain intact (just as they would if the
12476 /// `BufferCollectionTokenGroup` channel had remained open), and the client
12477 /// end close doesn't trigger buffer collection failure.
12478 ///
12479 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
12480 ///
12481 /// For brevity, the per-channel-protocol paragraphs above ignore the
12482 /// separate failure domain created by
12483 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
12484 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
12485 /// unexpectedly closes (without `Release` first) and that client end is
12486 /// under a failure domain, instead of failing the whole buffer collection,
12487 /// the failure domain is failed, but the buffer collection itself is
12488 /// isolated from failure of the failure domain. Such failure domains can be
12489 /// nested, in which case only the inner-most failure domain in which the
12490 /// `Node` resides fails.
12491 pub fn r#release(&self) -> Result<(), fidl::Error> {
12492 self.client.send::<fidl::encoding::EmptyPayload>(
12493 (),
12494 0x6a5cae7d6d6e04c6,
12495 fidl::encoding::DynamicFlags::FLEXIBLE,
12496 )
12497 }
12498
12499 /// Set a name for VMOs in this buffer collection.
12500 ///
12501 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
12502 /// will be truncated to fit. The name of the vmo will be suffixed with the
12503 /// buffer index within the collection (if the suffix fits within
12504 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
12505 /// listed in the inspect data.
12506 ///
12507 /// The name only affects VMOs allocated after the name is set; this call
12508 /// does not rename existing VMOs. If multiple clients set different names
12509 /// then the larger priority value will win. Setting a new name with the
12510 /// same priority as a prior name doesn't change the name.
12511 ///
12512 /// All table fields are currently required.
12513 ///
12514 /// + request `priority` The name is only set if this is the first `SetName`
12515 /// or if `priority` is greater than any previous `priority` value in
12516 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
12517 /// + request `name` The name for VMOs created under this buffer collection.
12518 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
12519 self.client.send::<NodeSetNameRequest>(
12520 payload,
12521 0xb41f1624f48c1e9,
12522 fidl::encoding::DynamicFlags::FLEXIBLE,
12523 )
12524 }
12525
12526 /// Set information about the current client that can be used by sysmem to
12527 /// help diagnose leaking memory and allocation stalls waiting for a
12528 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
12529 ///
12530 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
12531 /// `Node`(s) derived from this `Node`, unless overriden by
12532 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
12533 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
12534 ///
12535 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
12536 /// `Allocator` is the most efficient way to ensure that all
12537 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
12538 /// set, and is also more efficient than separately sending the same debug
12539 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
12540 /// created [`fuchsia.sysmem2/Node`].
12541 ///
12542 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
12543 /// indicate which client is closing their channel first, leading to subtree
12544 /// failure (which can be normal if the purpose of the subtree is over, but
12545 /// if happening earlier than expected, the client-channel-specific name can
12546 /// help diagnose where the failure is first coming from, from sysmem's
12547 /// point of view).
12548 ///
12549 /// All table fields are currently required.
12550 ///
12551 /// + request `name` This can be an arbitrary string, but the current
12552 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
12553 /// + request `id` This can be an arbitrary id, but the current process ID
12554 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
12555 pub fn r#set_debug_client_info(
12556 &self,
12557 mut payload: &NodeSetDebugClientInfoRequest,
12558 ) -> Result<(), fidl::Error> {
12559 self.client.send::<NodeSetDebugClientInfoRequest>(
12560 payload,
12561 0x5cde8914608d99b1,
12562 fidl::encoding::DynamicFlags::FLEXIBLE,
12563 )
12564 }
12565
12566 /// Sysmem logs a warning if sysmem hasn't seen
12567 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
12568 /// within 5 seconds after creation of a new collection.
12569 ///
12570 /// Clients can call this method to change when the log is printed. If
12571 /// multiple client set the deadline, it's unspecified which deadline will
12572 /// take effect.
12573 ///
12574 /// In most cases the default works well.
12575 ///
12576 /// All table fields are currently required.
12577 ///
12578 /// + request `deadline` The time at which sysmem will start trying to log
12579 /// the warning, unless all constraints are with sysmem by then.
12580 pub fn r#set_debug_timeout_log_deadline(
12581 &self,
12582 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12583 ) -> Result<(), fidl::Error> {
12584 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
12585 payload,
12586 0x716b0af13d5c0806,
12587 fidl::encoding::DynamicFlags::FLEXIBLE,
12588 )
12589 }
12590
12591 /// This enables verbose logging for the buffer collection.
12592 ///
12593 /// Verbose logging includes constraints set via
12594 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
12595 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
12596 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
12597 /// the tree of `Node`(s).
12598 ///
12599 /// Normally sysmem prints only a single line complaint when aggregation
12600 /// fails, with just the specific detailed reason that aggregation failed,
12601 /// with little surrounding context. While this is often enough to diagnose
12602 /// a problem if only a small change was made and everything was working
12603 /// before the small change, it's often not particularly helpful for getting
12604 /// a new buffer collection to work for the first time. Especially with
12605 /// more complex trees of nodes, involving things like
12606 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
12607 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
12608 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
12609 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
12610 /// looks like and why it's failing a logical allocation, or why a tree or
12611 /// subtree is failing sooner than expected.
12612 ///
12613 /// The intent of the extra logging is to be acceptable from a performance
12614 /// point of view, under the assumption that verbose logging is only enabled
12615 /// on a low number of buffer collections. If we're not tracking down a bug,
12616 /// we shouldn't send this message.
12617 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
12618 self.client.send::<fidl::encoding::EmptyPayload>(
12619 (),
12620 0x5209c77415b4dfad,
12621 fidl::encoding::DynamicFlags::FLEXIBLE,
12622 )
12623 }
12624
12625 /// This gets a handle that can be used as a parameter to
12626 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
12627 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
12628 /// client obtained this handle from this `Node`.
12629 ///
12630 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
12631 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
12632 /// despite the two calls typically being on different channels.
12633 ///
12634 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
12635 ///
12636 /// All table fields are currently required.
12637 ///
12638 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
12639 /// different `Node` channel, to prove that the client obtained the handle
12640 /// from this `Node`.
12641 pub fn r#get_node_ref(
12642 &self,
12643 ___deadline: zx::MonotonicInstant,
12644 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
12645 let _response = self.client.send_query::<
12646 fidl::encoding::EmptyPayload,
12647 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
12648 NodeMarker,
12649 >(
12650 (),
12651 0x5b3d0e51614df053,
12652 fidl::encoding::DynamicFlags::FLEXIBLE,
12653 ___deadline,
12654 )?
12655 .into_result::<NodeMarker>("get_node_ref")?;
12656 Ok(_response)
12657 }
12658
12659 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
12660 /// rooted at a different child token of a common parent
12661 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
12662 /// passed-in `node_ref`.
12663 ///
12664 /// This call is for assisting with admission control de-duplication, and
12665 /// with debugging.
12666 ///
12667 /// The `node_ref` must be obtained using
12668 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
12669 ///
12670 /// The `node_ref` can be a duplicated handle; it's not necessary to call
12671 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
12672 ///
12673 /// If a calling token may not actually be a valid token at all due to a
12674 /// potentially hostile/untrusted provider of the token, call
12675 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
12676 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
12677 /// never responds due to a calling token not being a real token (not really
12678 /// talking to sysmem). Another option is to call
12679 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
12680 /// which also validates the token along with converting it to a
12681 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
12682 ///
12683 /// All table fields are currently required.
12684 ///
12685 /// - response `is_alternate`
12686 /// - true: The first parent node in common between the calling node and
12687 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
12688 /// that the calling `Node` and the `node_ref` `Node` will not have both
12689 /// their constraints apply - rather sysmem will choose one or the other
12690 /// of the constraints - never both. This is because only one child of
12691 /// a `BufferCollectionTokenGroup` is selected during logical
12692 /// allocation, with only that one child's subtree contributing to
12693 /// constraints aggregation.
12694 /// - false: The first parent node in common between the calling `Node`
12695 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
12696 /// Currently, this means the first parent node in common is a
12697 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
12698 /// `Release`ed). This means that the calling `Node` and the `node_ref`
12699 /// `Node` may have both their constraints apply during constraints
12700 /// aggregation of the logical allocation, if both `Node`(s) are
12701 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
12702 /// this case, there is no `BufferCollectionTokenGroup` that will
12703 /// directly prevent the two `Node`(s) from both being selected and
12704 /// their constraints both aggregated, but even when false, one or both
12705 /// `Node`(s) may still be eliminated from consideration if one or both
12706 /// `Node`(s) has a direct or indirect parent
12707 /// `BufferCollectionTokenGroup` which selects a child subtree other
12708 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
12709 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
12710 /// associated with the same buffer collection as the calling `Node`.
12711 /// Another reason for this error is if the `node_ref` is an
12712 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
12713 /// a real `node_ref` obtained from `GetNodeRef`.
12714 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
12715 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
12716 /// the needed rights expected on a real `node_ref`.
12717 /// * No other failing status codes are returned by this call. However,
12718 /// sysmem may add additional codes in future, so the client should have
12719 /// sensible default handling for any failing status code.
12720 pub fn r#is_alternate_for(
12721 &self,
12722 mut payload: NodeIsAlternateForRequest,
12723 ___deadline: zx::MonotonicInstant,
12724 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
12725 let _response = self.client.send_query::<
12726 NodeIsAlternateForRequest,
12727 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
12728 NodeMarker,
12729 >(
12730 &mut payload,
12731 0x3a58e00157e0825,
12732 fidl::encoding::DynamicFlags::FLEXIBLE,
12733 ___deadline,
12734 )?
12735 .into_result::<NodeMarker>("is_alternate_for")?;
12736 Ok(_response.map(|x| x))
12737 }
12738
12739 /// Get the buffer collection ID. This ID is also available from
12740 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
12741 /// within the collection).
12742 ///
12743 /// This call is mainly useful in situations where we can't convey a
12744 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
12745 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
12746 /// handle, which can be joined back up with a `BufferCollection` client end
12747 /// that was created via a different path. Prefer to convey a
12748 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
12749 ///
12750 /// Trusting a `buffer_collection_id` value from a source other than sysmem
12751 /// is analogous to trusting a koid value from a source other than zircon.
12752 /// Both should be avoided unless really necessary, and both require
12753 /// caution. In some situations it may be reasonable to refer to a
12754 /// pre-established `BufferCollection` by `buffer_collection_id` via a
12755 /// protocol for efficiency reasons, but an incoming value purporting to be
12756 /// a `buffer_collection_id` is not sufficient alone to justify granting the
12757 /// sender of the `buffer_collection_id` any capability. The sender must
12758 /// first prove to a receiver that the sender has/had a VMO or has/had a
12759 /// `BufferCollectionToken` to the same collection by sending a handle that
12760 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
12761 /// `buffer_collection_id` value. The receiver should take care to avoid
12762 /// assuming that a sender had a `BufferCollectionToken` in cases where the
12763 /// sender has only proven that the sender had a VMO.
12764 ///
12765 /// - response `buffer_collection_id` This ID is unique per buffer
12766 /// collection per boot. Each buffer is uniquely identified by the
12767 /// `buffer_collection_id` and `buffer_index` together.
12768 pub fn r#get_buffer_collection_id(
12769 &self,
12770 ___deadline: zx::MonotonicInstant,
12771 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
12772 let _response = self.client.send_query::<
12773 fidl::encoding::EmptyPayload,
12774 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
12775 NodeMarker,
12776 >(
12777 (),
12778 0x77d19a494b78ba8c,
12779 fidl::encoding::DynamicFlags::FLEXIBLE,
12780 ___deadline,
12781 )?
12782 .into_result::<NodeMarker>("get_buffer_collection_id")?;
12783 Ok(_response)
12784 }
12785
12786 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
12787 /// created after this message to weak, which means that a client's `Node`
12788 /// client end (or a child created after this message) is not alone
12789 /// sufficient to keep allocated VMOs alive.
12790 ///
12791 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
12792 /// `close_weak_asap`.
12793 ///
12794 /// This message is only permitted before the `Node` becomes ready for
12795 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
12796 /// * `BufferCollectionToken`: any time
12797 /// * `BufferCollection`: before `SetConstraints`
12798 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
12799 ///
12800 /// Currently, no conversion from strong `Node` to weak `Node` after ready
12801 /// for allocation is provided, but a client can simulate that by creating
12802 /// an additional `Node` before allocation and setting that additional
12803 /// `Node` to weak, and then potentially at some point later sending
12804 /// `Release` and closing the client end of the client's strong `Node`, but
12805 /// keeping the client's weak `Node`.
12806 ///
12807 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
12808 /// collection failure (all `Node` client end(s) will see
12809 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
12810 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
12811 /// this situation until all `Node`(s) are ready for allocation. For initial
12812 /// allocation to succeed, at least one strong `Node` is required to exist
12813 /// at allocation time, but after that client receives VMO handles, that
12814 /// client can `BufferCollection.Release` and close the client end without
12815 /// causing this type of failure.
12816 ///
12817 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
12818 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
12819 /// separately as appropriate.
12820 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
12821 self.client.send::<fidl::encoding::EmptyPayload>(
12822 (),
12823 0x22dd3ea514eeffe1,
12824 fidl::encoding::DynamicFlags::FLEXIBLE,
12825 )
12826 }
12827
12828 /// This indicates to sysmem that the client is prepared to pay attention to
12829 /// `close_weak_asap`.
12830 ///
12831 /// If sent, this message must be before
12832 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
12833 ///
12834 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
12835 /// send this message before `WaitForAllBuffersAllocated`, or a parent
12836 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
12837 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
12838 /// trigger buffer collection failure.
12839 ///
12840 /// This message is necessary because weak sysmem VMOs have not always been
12841 /// a thing, so older clients are not aware of the need to pay attention to
12842 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
12843 /// sysmem weak VMO handles asap. By having this message and requiring
12844 /// participants to indicate their acceptance of this aspect of the overall
12845 /// protocol, we avoid situations where an older client is delivered a weak
12846 /// VMO without any way for sysmem to get that VMO to close quickly later
12847 /// (and on a per-buffer basis).
12848 ///
12849 /// A participant that doesn't handle `close_weak_asap` and also doesn't
12850 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
12851 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
12852 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
12853 /// same participant has a child/delegate which does retrieve VMOs, that
12854 /// child/delegate will need to send `SetWeakOk` before
12855 /// `WaitForAllBuffersAllocated`.
12856 ///
12857 /// + request `for_child_nodes_also` If present and true, this means direct
12858 /// child nodes of this node created after this message plus all
12859 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
12860 /// those nodes. Any child node of this node that was created before this
12861 /// message is not included. This setting is "sticky" in the sense that a
12862 /// subsequent `SetWeakOk` without this bool set to true does not reset
12863 /// the server-side bool. If this creates a problem for a participant, a
12864 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
12865 /// tokens instead, as appropriate. A participant should only set
12866 /// `for_child_nodes_also` true if the participant can really promise to
12867 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
12868 /// weak VMO handles held by participants holding the corresponding child
12869 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
12870 /// which are using sysmem(1) can be weak, despite the clients of those
12871 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
12872 /// direct way to find out about `close_weak_asap`. This only applies to
12873 /// descendents of this `Node` which are using sysmem(1), not to this
12874 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
12875 /// token, which will fail allocation unless an ancestor of this `Node`
12876 /// specified `for_child_nodes_also` true.
12877 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
12878 self.client.send::<NodeSetWeakOkRequest>(
12879 &mut payload,
12880 0x38a44fc4d7724be9,
12881 fidl::encoding::DynamicFlags::FLEXIBLE,
12882 )
12883 }
12884
12885 /// The server_end will be closed after this `Node` and any child nodes have
12886 /// have released their buffer counts, making those counts available for
12887 /// reservation by a different `Node` via
12888 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
12889 ///
12890 /// The `Node` buffer counts may not be released until the entire tree of
12891 /// `Node`(s) is closed or failed, because
12892 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
12893 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
12894 /// `Node` buffer counts remain reserved until the orphaned node is later
12895 /// cleaned up.
12896 ///
12897 /// If the `Node` exceeds a fairly large number of attached eventpair server
12898 /// ends, a log message will indicate this and the `Node` (and the
12899 /// appropriate) sub-tree will fail.
12900 ///
12901 /// The `server_end` will remain open when
12902 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
12903 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
12904 /// [`fuchsia.sysmem2/BufferCollection`].
12905 ///
12906 /// This message can also be used with a
12907 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
12908 pub fn r#attach_node_tracking(
12909 &self,
12910 mut payload: NodeAttachNodeTrackingRequest,
12911 ) -> Result<(), fidl::Error> {
12912 self.client.send::<NodeAttachNodeTrackingRequest>(
12913 &mut payload,
12914 0x3f22f2a293d3cdac,
12915 fidl::encoding::DynamicFlags::FLEXIBLE,
12916 )
12917 }
12918}
12919
12920#[cfg(target_os = "fuchsia")]
12921impl From<NodeSynchronousProxy> for zx::NullableHandle {
12922 fn from(value: NodeSynchronousProxy) -> Self {
12923 value.into_channel().into()
12924 }
12925}
12926
12927#[cfg(target_os = "fuchsia")]
12928impl From<fidl::Channel> for NodeSynchronousProxy {
12929 fn from(value: fidl::Channel) -> Self {
12930 Self::new(value)
12931 }
12932}
12933
12934#[cfg(target_os = "fuchsia")]
12935impl fidl::endpoints::FromClient for NodeSynchronousProxy {
12936 type Protocol = NodeMarker;
12937
12938 fn from_client(value: fidl::endpoints::ClientEnd<NodeMarker>) -> Self {
12939 Self::new(value.into_channel())
12940 }
12941}
12942
12943#[derive(Debug, Clone)]
12944pub struct NodeProxy {
12945 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
12946}
12947
12948impl fidl::endpoints::Proxy for NodeProxy {
12949 type Protocol = NodeMarker;
12950
12951 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
12952 Self::new(inner)
12953 }
12954
12955 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
12956 self.client.into_channel().map_err(|client| Self { client })
12957 }
12958
12959 fn as_channel(&self) -> &::fidl::AsyncChannel {
12960 self.client.as_channel()
12961 }
12962}
12963
12964impl NodeProxy {
12965 /// Create a new Proxy for fuchsia.sysmem2/Node.
12966 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
12967 let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12968 Self { client: fidl::client::Client::new(channel, protocol_name) }
12969 }
12970
12971 /// Get a Stream of events from the remote end of the protocol.
12972 ///
12973 /// # Panics
12974 ///
12975 /// Panics if the event stream was already taken.
12976 pub fn take_event_stream(&self) -> NodeEventStream {
12977 NodeEventStream { event_receiver: self.client.take_event_receiver() }
12978 }
12979
12980 /// Ensure that previous messages have been received server side. This is
12981 /// particularly useful after previous messages that created new tokens,
12982 /// because a token must be known to the sysmem server before sending the
12983 /// token to another participant.
12984 ///
12985 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12986 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12987 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12988 /// to mitigate the possibility of a hostile/fake
12989 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12990 /// Another way is to pass the token to
12991 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12992 /// the token as part of exchanging it for a
12993 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12994 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12995 /// of stalling.
12996 ///
12997 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12998 /// and then starting and completing a `Sync`, it's then safe to send the
12999 /// `BufferCollectionToken` client ends to other participants knowing the
13000 /// server will recognize the tokens when they're sent by the other
13001 /// participants to sysmem in a
13002 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
13003 /// efficient way to create tokens while avoiding unnecessary round trips.
13004 ///
13005 /// Other options include waiting for each
13006 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
13007 /// individually (using separate call to `Sync` after each), or calling
13008 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
13009 /// converted to a `BufferCollection` via
13010 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
13011 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
13012 /// the sync step and can create multiple tokens at once.
13013 pub fn r#sync(
13014 &self,
13015 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
13016 NodeProxyInterface::r#sync(self)
13017 }
13018
13019 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
13020 ///
13021 /// Normally a participant will convert a `BufferCollectionToken` into a
13022 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
13023 /// `Release` via the token (and then close the channel immediately or
13024 /// shortly later in response to server closing the server end), which
13025 /// avoids causing buffer collection failure. Without a prior `Release`,
13026 /// closing the `BufferCollectionToken` client end will cause buffer
13027 /// collection failure.
13028 ///
13029 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
13030 ///
13031 /// By default the server handles unexpected closure of a
13032 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
13033 /// first) by failing the buffer collection. Partly this is to expedite
13034 /// closing VMO handles to reclaim memory when any participant fails. If a
13035 /// participant would like to cleanly close a `BufferCollection` without
13036 /// causing buffer collection failure, the participant can send `Release`
13037 /// before closing the `BufferCollection` client end. The `Release` can
13038 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
13039 /// buffer collection won't require constraints from this node in order to
13040 /// allocate. If after `SetConstraints`, the constraints are retained and
13041 /// aggregated, despite the lack of `BufferCollection` connection at the
13042 /// time of constraints aggregation.
13043 ///
13044 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13045 ///
13046 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13047 /// end (without `Release` first) will trigger failure of the buffer
13048 /// collection. To close a `BufferCollectionTokenGroup` channel without
13049 /// failing the buffer collection, ensure that AllChildrenPresent() has been
13050 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13051 /// client end.
13052 ///
13053 /// If `Release` occurs before
13054 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13055 /// buffer collection will fail (triggered by reception of `Release` without
13056 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13057 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13058 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13059 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13060 /// close requires `AllChildrenPresent` (if not already sent), then
13061 /// `Release`, then close client end.
13062 ///
13063 /// If `Release` occurs after `AllChildrenPresent`, the children and all
13064 /// their constraints remain intact (just as they would if the
13065 /// `BufferCollectionTokenGroup` channel had remained open), and the client
13066 /// end close doesn't trigger buffer collection failure.
13067 ///
13068 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
13069 ///
13070 /// For brevity, the per-channel-protocol paragraphs above ignore the
13071 /// separate failure domain created by
13072 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
13073 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
13074 /// unexpectedly closes (without `Release` first) and that client end is
13075 /// under a failure domain, instead of failing the whole buffer collection,
13076 /// the failure domain is failed, but the buffer collection itself is
13077 /// isolated from failure of the failure domain. Such failure domains can be
13078 /// nested, in which case only the inner-most failure domain in which the
13079 /// `Node` resides fails.
13080 pub fn r#release(&self) -> Result<(), fidl::Error> {
13081 NodeProxyInterface::r#release(self)
13082 }
13083
13084 /// Set a name for VMOs in this buffer collection.
13085 ///
13086 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
13087 /// will be truncated to fit. The name of the vmo will be suffixed with the
13088 /// buffer index within the collection (if the suffix fits within
13089 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
13090 /// listed in the inspect data.
13091 ///
13092 /// The name only affects VMOs allocated after the name is set; this call
13093 /// does not rename existing VMOs. If multiple clients set different names
13094 /// then the larger priority value will win. Setting a new name with the
13095 /// same priority as a prior name doesn't change the name.
13096 ///
13097 /// All table fields are currently required.
13098 ///
13099 /// + request `priority` The name is only set if this is the first `SetName`
13100 /// or if `priority` is greater than any previous `priority` value in
13101 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
13102 /// + request `name` The name for VMOs created under this buffer collection.
13103 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13104 NodeProxyInterface::r#set_name(self, payload)
13105 }
13106
13107 /// Set information about the current client that can be used by sysmem to
13108 /// help diagnose leaking memory and allocation stalls waiting for a
13109 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
13110 ///
13111 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
13112 /// `Node`(s) derived from this `Node`, unless overriden by
13113 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
13114 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
13115 ///
13116 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
13117 /// `Allocator` is the most efficient way to ensure that all
13118 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
13119 /// set, and is also more efficient than separately sending the same debug
13120 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
13121 /// created [`fuchsia.sysmem2/Node`].
13122 ///
13123 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
13124 /// indicate which client is closing their channel first, leading to subtree
13125 /// failure (which can be normal if the purpose of the subtree is over, but
13126 /// if happening earlier than expected, the client-channel-specific name can
13127 /// help diagnose where the failure is first coming from, from sysmem's
13128 /// point of view).
13129 ///
13130 /// All table fields are currently required.
13131 ///
13132 /// + request `name` This can be an arbitrary string, but the current
13133 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
13134 /// + request `id` This can be an arbitrary id, but the current process ID
13135 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
13136 pub fn r#set_debug_client_info(
13137 &self,
13138 mut payload: &NodeSetDebugClientInfoRequest,
13139 ) -> Result<(), fidl::Error> {
13140 NodeProxyInterface::r#set_debug_client_info(self, payload)
13141 }
13142
13143 /// Sysmem logs a warning if sysmem hasn't seen
13144 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
13145 /// within 5 seconds after creation of a new collection.
13146 ///
13147 /// Clients can call this method to change when the log is printed. If
13148 /// multiple client set the deadline, it's unspecified which deadline will
13149 /// take effect.
13150 ///
13151 /// In most cases the default works well.
13152 ///
13153 /// All table fields are currently required.
13154 ///
13155 /// + request `deadline` The time at which sysmem will start trying to log
13156 /// the warning, unless all constraints are with sysmem by then.
13157 pub fn r#set_debug_timeout_log_deadline(
13158 &self,
13159 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13160 ) -> Result<(), fidl::Error> {
13161 NodeProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
13162 }
13163
13164 /// This enables verbose logging for the buffer collection.
13165 ///
13166 /// Verbose logging includes constraints set via
13167 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
13168 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
13169 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
13170 /// the tree of `Node`(s).
13171 ///
13172 /// Normally sysmem prints only a single line complaint when aggregation
13173 /// fails, with just the specific detailed reason that aggregation failed,
13174 /// with little surrounding context. While this is often enough to diagnose
13175 /// a problem if only a small change was made and everything was working
13176 /// before the small change, it's often not particularly helpful for getting
13177 /// a new buffer collection to work for the first time. Especially with
13178 /// more complex trees of nodes, involving things like
13179 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
13180 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
13181 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
13182 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
13183 /// looks like and why it's failing a logical allocation, or why a tree or
13184 /// subtree is failing sooner than expected.
13185 ///
13186 /// The intent of the extra logging is to be acceptable from a performance
13187 /// point of view, under the assumption that verbose logging is only enabled
13188 /// on a low number of buffer collections. If we're not tracking down a bug,
13189 /// we shouldn't send this message.
13190 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13191 NodeProxyInterface::r#set_verbose_logging(self)
13192 }
13193
13194 /// This gets a handle that can be used as a parameter to
13195 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
13196 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
13197 /// client obtained this handle from this `Node`.
13198 ///
13199 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
13200 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
13201 /// despite the two calls typically being on different channels.
13202 ///
13203 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
13204 ///
13205 /// All table fields are currently required.
13206 ///
13207 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
13208 /// different `Node` channel, to prove that the client obtained the handle
13209 /// from this `Node`.
13210 pub fn r#get_node_ref(
13211 &self,
13212 ) -> fidl::client::QueryResponseFut<
13213 NodeGetNodeRefResponse,
13214 fidl::encoding::DefaultFuchsiaResourceDialect,
13215 > {
13216 NodeProxyInterface::r#get_node_ref(self)
13217 }
13218
13219 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
13220 /// rooted at a different child token of a common parent
13221 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
13222 /// passed-in `node_ref`.
13223 ///
13224 /// This call is for assisting with admission control de-duplication, and
13225 /// with debugging.
13226 ///
13227 /// The `node_ref` must be obtained using
13228 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
13229 ///
13230 /// The `node_ref` can be a duplicated handle; it's not necessary to call
13231 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
13232 ///
13233 /// If a calling token may not actually be a valid token at all due to a
13234 /// potentially hostile/untrusted provider of the token, call
13235 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
13236 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
13237 /// never responds due to a calling token not being a real token (not really
13238 /// talking to sysmem). Another option is to call
13239 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
13240 /// which also validates the token along with converting it to a
13241 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
13242 ///
13243 /// All table fields are currently required.
13244 ///
13245 /// - response `is_alternate`
13246 /// - true: The first parent node in common between the calling node and
13247 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
13248 /// that the calling `Node` and the `node_ref` `Node` will not have both
13249 /// their constraints apply - rather sysmem will choose one or the other
13250 /// of the constraints - never both. This is because only one child of
13251 /// a `BufferCollectionTokenGroup` is selected during logical
13252 /// allocation, with only that one child's subtree contributing to
13253 /// constraints aggregation.
13254 /// - false: The first parent node in common between the calling `Node`
13255 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
13256 /// Currently, this means the first parent node in common is a
13257 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
13258 /// `Release`ed). This means that the calling `Node` and the `node_ref`
13259 /// `Node` may have both their constraints apply during constraints
13260 /// aggregation of the logical allocation, if both `Node`(s) are
13261 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
13262 /// this case, there is no `BufferCollectionTokenGroup` that will
13263 /// directly prevent the two `Node`(s) from both being selected and
13264 /// their constraints both aggregated, but even when false, one or both
13265 /// `Node`(s) may still be eliminated from consideration if one or both
13266 /// `Node`(s) has a direct or indirect parent
13267 /// `BufferCollectionTokenGroup` which selects a child subtree other
13268 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
13269 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
13270 /// associated with the same buffer collection as the calling `Node`.
13271 /// Another reason for this error is if the `node_ref` is an
13272 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
13273 /// a real `node_ref` obtained from `GetNodeRef`.
13274 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
13275 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
13276 /// the needed rights expected on a real `node_ref`.
13277 /// * No other failing status codes are returned by this call. However,
13278 /// sysmem may add additional codes in future, so the client should have
13279 /// sensible default handling for any failing status code.
13280 pub fn r#is_alternate_for(
13281 &self,
13282 mut payload: NodeIsAlternateForRequest,
13283 ) -> fidl::client::QueryResponseFut<
13284 NodeIsAlternateForResult,
13285 fidl::encoding::DefaultFuchsiaResourceDialect,
13286 > {
13287 NodeProxyInterface::r#is_alternate_for(self, payload)
13288 }
13289
13290 /// Get the buffer collection ID. This ID is also available from
13291 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
13292 /// within the collection).
13293 ///
13294 /// This call is mainly useful in situations where we can't convey a
13295 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
13296 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
13297 /// handle, which can be joined back up with a `BufferCollection` client end
13298 /// that was created via a different path. Prefer to convey a
13299 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
13300 ///
13301 /// Trusting a `buffer_collection_id` value from a source other than sysmem
13302 /// is analogous to trusting a koid value from a source other than zircon.
13303 /// Both should be avoided unless really necessary, and both require
13304 /// caution. In some situations it may be reasonable to refer to a
13305 /// pre-established `BufferCollection` by `buffer_collection_id` via a
13306 /// protocol for efficiency reasons, but an incoming value purporting to be
13307 /// a `buffer_collection_id` is not sufficient alone to justify granting the
13308 /// sender of the `buffer_collection_id` any capability. The sender must
13309 /// first prove to a receiver that the sender has/had a VMO or has/had a
13310 /// `BufferCollectionToken` to the same collection by sending a handle that
13311 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
13312 /// `buffer_collection_id` value. The receiver should take care to avoid
13313 /// assuming that a sender had a `BufferCollectionToken` in cases where the
13314 /// sender has only proven that the sender had a VMO.
13315 ///
13316 /// - response `buffer_collection_id` This ID is unique per buffer
13317 /// collection per boot. Each buffer is uniquely identified by the
13318 /// `buffer_collection_id` and `buffer_index` together.
13319 pub fn r#get_buffer_collection_id(
13320 &self,
13321 ) -> fidl::client::QueryResponseFut<
13322 NodeGetBufferCollectionIdResponse,
13323 fidl::encoding::DefaultFuchsiaResourceDialect,
13324 > {
13325 NodeProxyInterface::r#get_buffer_collection_id(self)
13326 }
13327
13328 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
13329 /// created after this message to weak, which means that a client's `Node`
13330 /// client end (or a child created after this message) is not alone
13331 /// sufficient to keep allocated VMOs alive.
13332 ///
13333 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
13334 /// `close_weak_asap`.
13335 ///
13336 /// This message is only permitted before the `Node` becomes ready for
13337 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
13338 /// * `BufferCollectionToken`: any time
13339 /// * `BufferCollection`: before `SetConstraints`
13340 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
13341 ///
13342 /// Currently, no conversion from strong `Node` to weak `Node` after ready
13343 /// for allocation is provided, but a client can simulate that by creating
13344 /// an additional `Node` before allocation and setting that additional
13345 /// `Node` to weak, and then potentially at some point later sending
13346 /// `Release` and closing the client end of the client's strong `Node`, but
13347 /// keeping the client's weak `Node`.
13348 ///
13349 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
13350 /// collection failure (all `Node` client end(s) will see
13351 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
13352 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
13353 /// this situation until all `Node`(s) are ready for allocation. For initial
13354 /// allocation to succeed, at least one strong `Node` is required to exist
13355 /// at allocation time, but after that client receives VMO handles, that
13356 /// client can `BufferCollection.Release` and close the client end without
13357 /// causing this type of failure.
13358 ///
13359 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
13360 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
13361 /// separately as appropriate.
13362 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
13363 NodeProxyInterface::r#set_weak(self)
13364 }
13365
13366 /// This indicates to sysmem that the client is prepared to pay attention to
13367 /// `close_weak_asap`.
13368 ///
13369 /// If sent, this message must be before
13370 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
13371 ///
13372 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
13373 /// send this message before `WaitForAllBuffersAllocated`, or a parent
13374 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
13375 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
13376 /// trigger buffer collection failure.
13377 ///
13378 /// This message is necessary because weak sysmem VMOs have not always been
13379 /// a thing, so older clients are not aware of the need to pay attention to
13380 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
13381 /// sysmem weak VMO handles asap. By having this message and requiring
13382 /// participants to indicate their acceptance of this aspect of the overall
13383 /// protocol, we avoid situations where an older client is delivered a weak
13384 /// VMO without any way for sysmem to get that VMO to close quickly later
13385 /// (and on a per-buffer basis).
13386 ///
13387 /// A participant that doesn't handle `close_weak_asap` and also doesn't
13388 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
13389 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
13390 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
13391 /// same participant has a child/delegate which does retrieve VMOs, that
13392 /// child/delegate will need to send `SetWeakOk` before
13393 /// `WaitForAllBuffersAllocated`.
13394 ///
13395 /// + request `for_child_nodes_also` If present and true, this means direct
13396 /// child nodes of this node created after this message plus all
13397 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
13398 /// those nodes. Any child node of this node that was created before this
13399 /// message is not included. This setting is "sticky" in the sense that a
13400 /// subsequent `SetWeakOk` without this bool set to true does not reset
13401 /// the server-side bool. If this creates a problem for a participant, a
13402 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
13403 /// tokens instead, as appropriate. A participant should only set
13404 /// `for_child_nodes_also` true if the participant can really promise to
13405 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
13406 /// weak VMO handles held by participants holding the corresponding child
13407 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
13408 /// which are using sysmem(1) can be weak, despite the clients of those
13409 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
13410 /// direct way to find out about `close_weak_asap`. This only applies to
13411 /// descendents of this `Node` which are using sysmem(1), not to this
13412 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
13413 /// token, which will fail allocation unless an ancestor of this `Node`
13414 /// specified `for_child_nodes_also` true.
13415 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13416 NodeProxyInterface::r#set_weak_ok(self, payload)
13417 }
13418
13419 /// The server_end will be closed after this `Node` and any child nodes have
13420 /// have released their buffer counts, making those counts available for
13421 /// reservation by a different `Node` via
13422 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
13423 ///
13424 /// The `Node` buffer counts may not be released until the entire tree of
13425 /// `Node`(s) is closed or failed, because
13426 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
13427 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
13428 /// `Node` buffer counts remain reserved until the orphaned node is later
13429 /// cleaned up.
13430 ///
13431 /// If the `Node` exceeds a fairly large number of attached eventpair server
13432 /// ends, a log message will indicate this and the `Node` (and the
13433 /// appropriate) sub-tree will fail.
13434 ///
13435 /// The `server_end` will remain open when
13436 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
13437 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
13438 /// [`fuchsia.sysmem2/BufferCollection`].
13439 ///
13440 /// This message can also be used with a
13441 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
13442 pub fn r#attach_node_tracking(
13443 &self,
13444 mut payload: NodeAttachNodeTrackingRequest,
13445 ) -> Result<(), fidl::Error> {
13446 NodeProxyInterface::r#attach_node_tracking(self, payload)
13447 }
13448}
13449
13450impl NodeProxyInterface for NodeProxy {
13451 type SyncResponseFut =
13452 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
13453 fn r#sync(&self) -> Self::SyncResponseFut {
13454 fn _decode(
13455 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13456 ) -> Result<(), fidl::Error> {
13457 let _response = fidl::client::decode_transaction_body::<
13458 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
13459 fidl::encoding::DefaultFuchsiaResourceDialect,
13460 0x11ac2555cf575b54,
13461 >(_buf?)?
13462 .into_result::<NodeMarker>("sync")?;
13463 Ok(_response)
13464 }
13465 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
13466 (),
13467 0x11ac2555cf575b54,
13468 fidl::encoding::DynamicFlags::FLEXIBLE,
13469 _decode,
13470 )
13471 }
13472
13473 fn r#release(&self) -> Result<(), fidl::Error> {
13474 self.client.send::<fidl::encoding::EmptyPayload>(
13475 (),
13476 0x6a5cae7d6d6e04c6,
13477 fidl::encoding::DynamicFlags::FLEXIBLE,
13478 )
13479 }
13480
13481 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13482 self.client.send::<NodeSetNameRequest>(
13483 payload,
13484 0xb41f1624f48c1e9,
13485 fidl::encoding::DynamicFlags::FLEXIBLE,
13486 )
13487 }
13488
13489 fn r#set_debug_client_info(
13490 &self,
13491 mut payload: &NodeSetDebugClientInfoRequest,
13492 ) -> Result<(), fidl::Error> {
13493 self.client.send::<NodeSetDebugClientInfoRequest>(
13494 payload,
13495 0x5cde8914608d99b1,
13496 fidl::encoding::DynamicFlags::FLEXIBLE,
13497 )
13498 }
13499
13500 fn r#set_debug_timeout_log_deadline(
13501 &self,
13502 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13503 ) -> Result<(), fidl::Error> {
13504 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
13505 payload,
13506 0x716b0af13d5c0806,
13507 fidl::encoding::DynamicFlags::FLEXIBLE,
13508 )
13509 }
13510
13511 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13512 self.client.send::<fidl::encoding::EmptyPayload>(
13513 (),
13514 0x5209c77415b4dfad,
13515 fidl::encoding::DynamicFlags::FLEXIBLE,
13516 )
13517 }
13518
13519 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
13520 NodeGetNodeRefResponse,
13521 fidl::encoding::DefaultFuchsiaResourceDialect,
13522 >;
13523 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
13524 fn _decode(
13525 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13526 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
13527 let _response = fidl::client::decode_transaction_body::<
13528 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
13529 fidl::encoding::DefaultFuchsiaResourceDialect,
13530 0x5b3d0e51614df053,
13531 >(_buf?)?
13532 .into_result::<NodeMarker>("get_node_ref")?;
13533 Ok(_response)
13534 }
13535 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
13536 (),
13537 0x5b3d0e51614df053,
13538 fidl::encoding::DynamicFlags::FLEXIBLE,
13539 _decode,
13540 )
13541 }
13542
13543 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
13544 NodeIsAlternateForResult,
13545 fidl::encoding::DefaultFuchsiaResourceDialect,
13546 >;
13547 fn r#is_alternate_for(
13548 &self,
13549 mut payload: NodeIsAlternateForRequest,
13550 ) -> Self::IsAlternateForResponseFut {
13551 fn _decode(
13552 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13553 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
13554 let _response = fidl::client::decode_transaction_body::<
13555 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
13556 fidl::encoding::DefaultFuchsiaResourceDialect,
13557 0x3a58e00157e0825,
13558 >(_buf?)?
13559 .into_result::<NodeMarker>("is_alternate_for")?;
13560 Ok(_response.map(|x| x))
13561 }
13562 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
13563 &mut payload,
13564 0x3a58e00157e0825,
13565 fidl::encoding::DynamicFlags::FLEXIBLE,
13566 _decode,
13567 )
13568 }
13569
13570 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
13571 NodeGetBufferCollectionIdResponse,
13572 fidl::encoding::DefaultFuchsiaResourceDialect,
13573 >;
13574 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
13575 fn _decode(
13576 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13577 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
13578 let _response = fidl::client::decode_transaction_body::<
13579 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
13580 fidl::encoding::DefaultFuchsiaResourceDialect,
13581 0x77d19a494b78ba8c,
13582 >(_buf?)?
13583 .into_result::<NodeMarker>("get_buffer_collection_id")?;
13584 Ok(_response)
13585 }
13586 self.client.send_query_and_decode::<
13587 fidl::encoding::EmptyPayload,
13588 NodeGetBufferCollectionIdResponse,
13589 >(
13590 (),
13591 0x77d19a494b78ba8c,
13592 fidl::encoding::DynamicFlags::FLEXIBLE,
13593 _decode,
13594 )
13595 }
13596
13597 fn r#set_weak(&self) -> Result<(), fidl::Error> {
13598 self.client.send::<fidl::encoding::EmptyPayload>(
13599 (),
13600 0x22dd3ea514eeffe1,
13601 fidl::encoding::DynamicFlags::FLEXIBLE,
13602 )
13603 }
13604
13605 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13606 self.client.send::<NodeSetWeakOkRequest>(
13607 &mut payload,
13608 0x38a44fc4d7724be9,
13609 fidl::encoding::DynamicFlags::FLEXIBLE,
13610 )
13611 }
13612
13613 fn r#attach_node_tracking(
13614 &self,
13615 mut payload: NodeAttachNodeTrackingRequest,
13616 ) -> Result<(), fidl::Error> {
13617 self.client.send::<NodeAttachNodeTrackingRequest>(
13618 &mut payload,
13619 0x3f22f2a293d3cdac,
13620 fidl::encoding::DynamicFlags::FLEXIBLE,
13621 )
13622 }
13623}
13624
13625pub struct NodeEventStream {
13626 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
13627}
13628
13629impl std::marker::Unpin for NodeEventStream {}
13630
13631impl futures::stream::FusedStream for NodeEventStream {
13632 fn is_terminated(&self) -> bool {
13633 self.event_receiver.is_terminated()
13634 }
13635}
13636
13637impl futures::Stream for NodeEventStream {
13638 type Item = Result<NodeEvent, fidl::Error>;
13639
13640 fn poll_next(
13641 mut self: std::pin::Pin<&mut Self>,
13642 cx: &mut std::task::Context<'_>,
13643 ) -> std::task::Poll<Option<Self::Item>> {
13644 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
13645 &mut self.event_receiver,
13646 cx
13647 )?) {
13648 Some(buf) => std::task::Poll::Ready(Some(NodeEvent::decode(buf))),
13649 None => std::task::Poll::Ready(None),
13650 }
13651 }
13652}
13653
13654#[derive(Debug)]
13655pub enum NodeEvent {
13656 #[non_exhaustive]
13657 _UnknownEvent {
13658 /// Ordinal of the event that was sent.
13659 ordinal: u64,
13660 },
13661}
13662
13663impl NodeEvent {
13664 /// Decodes a message buffer as a [`NodeEvent`].
13665 fn decode(
13666 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
13667 ) -> Result<NodeEvent, fidl::Error> {
13668 let (bytes, _handles) = buf.split_mut();
13669 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13670 debug_assert_eq!(tx_header.tx_id, 0);
13671 match tx_header.ordinal {
13672 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
13673 Ok(NodeEvent::_UnknownEvent { ordinal: tx_header.ordinal })
13674 }
13675 _ => Err(fidl::Error::UnknownOrdinal {
13676 ordinal: tx_header.ordinal,
13677 protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13678 }),
13679 }
13680 }
13681}
13682
13683/// A Stream of incoming requests for fuchsia.sysmem2/Node.
13684pub struct NodeRequestStream {
13685 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13686 is_terminated: bool,
13687}
13688
13689impl std::marker::Unpin for NodeRequestStream {}
13690
13691impl futures::stream::FusedStream for NodeRequestStream {
13692 fn is_terminated(&self) -> bool {
13693 self.is_terminated
13694 }
13695}
13696
13697impl fidl::endpoints::RequestStream for NodeRequestStream {
13698 type Protocol = NodeMarker;
13699 type ControlHandle = NodeControlHandle;
13700
13701 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
13702 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
13703 }
13704
13705 fn control_handle(&self) -> Self::ControlHandle {
13706 NodeControlHandle { inner: self.inner.clone() }
13707 }
13708
13709 fn into_inner(
13710 self,
13711 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
13712 {
13713 (self.inner, self.is_terminated)
13714 }
13715
13716 fn from_inner(
13717 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13718 is_terminated: bool,
13719 ) -> Self {
13720 Self { inner, is_terminated }
13721 }
13722}
13723
13724impl futures::Stream for NodeRequestStream {
13725 type Item = Result<NodeRequest, fidl::Error>;
13726
13727 fn poll_next(
13728 mut self: std::pin::Pin<&mut Self>,
13729 cx: &mut std::task::Context<'_>,
13730 ) -> std::task::Poll<Option<Self::Item>> {
13731 let this = &mut *self;
13732 if this.inner.check_shutdown(cx) {
13733 this.is_terminated = true;
13734 return std::task::Poll::Ready(None);
13735 }
13736 if this.is_terminated {
13737 panic!("polled NodeRequestStream after completion");
13738 }
13739 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
13740 |bytes, handles| {
13741 match this.inner.channel().read_etc(cx, bytes, handles) {
13742 std::task::Poll::Ready(Ok(())) => {}
13743 std::task::Poll::Pending => return std::task::Poll::Pending,
13744 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
13745 this.is_terminated = true;
13746 return std::task::Poll::Ready(None);
13747 }
13748 std::task::Poll::Ready(Err(e)) => {
13749 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
13750 e.into(),
13751 ))));
13752 }
13753 }
13754
13755 // A message has been received from the channel
13756 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13757
13758 std::task::Poll::Ready(Some(match header.ordinal {
13759 0x11ac2555cf575b54 => {
13760 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13761 let mut req = fidl::new_empty!(
13762 fidl::encoding::EmptyPayload,
13763 fidl::encoding::DefaultFuchsiaResourceDialect
13764 );
13765 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13766 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13767 Ok(NodeRequest::Sync {
13768 responder: NodeSyncResponder {
13769 control_handle: std::mem::ManuallyDrop::new(control_handle),
13770 tx_id: header.tx_id,
13771 },
13772 })
13773 }
13774 0x6a5cae7d6d6e04c6 => {
13775 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13776 let mut req = fidl::new_empty!(
13777 fidl::encoding::EmptyPayload,
13778 fidl::encoding::DefaultFuchsiaResourceDialect
13779 );
13780 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13781 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13782 Ok(NodeRequest::Release { control_handle })
13783 }
13784 0xb41f1624f48c1e9 => {
13785 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13786 let mut req = fidl::new_empty!(
13787 NodeSetNameRequest,
13788 fidl::encoding::DefaultFuchsiaResourceDialect
13789 );
13790 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
13791 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13792 Ok(NodeRequest::SetName { payload: req, control_handle })
13793 }
13794 0x5cde8914608d99b1 => {
13795 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13796 let mut req = fidl::new_empty!(
13797 NodeSetDebugClientInfoRequest,
13798 fidl::encoding::DefaultFuchsiaResourceDialect
13799 );
13800 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
13801 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13802 Ok(NodeRequest::SetDebugClientInfo { payload: req, control_handle })
13803 }
13804 0x716b0af13d5c0806 => {
13805 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13806 let mut req = fidl::new_empty!(
13807 NodeSetDebugTimeoutLogDeadlineRequest,
13808 fidl::encoding::DefaultFuchsiaResourceDialect
13809 );
13810 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
13811 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13812 Ok(NodeRequest::SetDebugTimeoutLogDeadline { payload: req, control_handle })
13813 }
13814 0x5209c77415b4dfad => {
13815 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13816 let mut req = fidl::new_empty!(
13817 fidl::encoding::EmptyPayload,
13818 fidl::encoding::DefaultFuchsiaResourceDialect
13819 );
13820 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13821 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13822 Ok(NodeRequest::SetVerboseLogging { control_handle })
13823 }
13824 0x5b3d0e51614df053 => {
13825 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13826 let mut req = fidl::new_empty!(
13827 fidl::encoding::EmptyPayload,
13828 fidl::encoding::DefaultFuchsiaResourceDialect
13829 );
13830 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13831 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13832 Ok(NodeRequest::GetNodeRef {
13833 responder: NodeGetNodeRefResponder {
13834 control_handle: std::mem::ManuallyDrop::new(control_handle),
13835 tx_id: header.tx_id,
13836 },
13837 })
13838 }
13839 0x3a58e00157e0825 => {
13840 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13841 let mut req = fidl::new_empty!(
13842 NodeIsAlternateForRequest,
13843 fidl::encoding::DefaultFuchsiaResourceDialect
13844 );
13845 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
13846 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13847 Ok(NodeRequest::IsAlternateFor {
13848 payload: req,
13849 responder: NodeIsAlternateForResponder {
13850 control_handle: std::mem::ManuallyDrop::new(control_handle),
13851 tx_id: header.tx_id,
13852 },
13853 })
13854 }
13855 0x77d19a494b78ba8c => {
13856 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13857 let mut req = fidl::new_empty!(
13858 fidl::encoding::EmptyPayload,
13859 fidl::encoding::DefaultFuchsiaResourceDialect
13860 );
13861 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13862 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13863 Ok(NodeRequest::GetBufferCollectionId {
13864 responder: NodeGetBufferCollectionIdResponder {
13865 control_handle: std::mem::ManuallyDrop::new(control_handle),
13866 tx_id: header.tx_id,
13867 },
13868 })
13869 }
13870 0x22dd3ea514eeffe1 => {
13871 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13872 let mut req = fidl::new_empty!(
13873 fidl::encoding::EmptyPayload,
13874 fidl::encoding::DefaultFuchsiaResourceDialect
13875 );
13876 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13877 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13878 Ok(NodeRequest::SetWeak { control_handle })
13879 }
13880 0x38a44fc4d7724be9 => {
13881 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13882 let mut req = fidl::new_empty!(
13883 NodeSetWeakOkRequest,
13884 fidl::encoding::DefaultFuchsiaResourceDialect
13885 );
13886 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
13887 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13888 Ok(NodeRequest::SetWeakOk { payload: req, control_handle })
13889 }
13890 0x3f22f2a293d3cdac => {
13891 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13892 let mut req = fidl::new_empty!(
13893 NodeAttachNodeTrackingRequest,
13894 fidl::encoding::DefaultFuchsiaResourceDialect
13895 );
13896 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
13897 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13898 Ok(NodeRequest::AttachNodeTracking { payload: req, control_handle })
13899 }
13900 _ if header.tx_id == 0
13901 && header
13902 .dynamic_flags()
13903 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13904 {
13905 Ok(NodeRequest::_UnknownMethod {
13906 ordinal: header.ordinal,
13907 control_handle: NodeControlHandle { inner: this.inner.clone() },
13908 method_type: fidl::MethodType::OneWay,
13909 })
13910 }
13911 _ if header
13912 .dynamic_flags()
13913 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13914 {
13915 this.inner.send_framework_err(
13916 fidl::encoding::FrameworkErr::UnknownMethod,
13917 header.tx_id,
13918 header.ordinal,
13919 header.dynamic_flags(),
13920 (bytes, handles),
13921 )?;
13922 Ok(NodeRequest::_UnknownMethod {
13923 ordinal: header.ordinal,
13924 control_handle: NodeControlHandle { inner: this.inner.clone() },
13925 method_type: fidl::MethodType::TwoWay,
13926 })
13927 }
13928 _ => Err(fidl::Error::UnknownOrdinal {
13929 ordinal: header.ordinal,
13930 protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13931 }),
13932 }))
13933 },
13934 )
13935 }
13936}
13937
13938/// This protocol is the parent protocol for all nodes in the tree established
13939/// by [`fuchsia.sysmem2/BufferCollectionToken`] creation and
13940/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] creation, including
13941/// [`fuchsia.sysmem2/BufferCollectionToken`](s) which have since been converted
13942/// to a [`fuchsia.sysmem2/BufferCollection`] channel.
13943///
13944/// Epitaphs are not used in this protocol.
13945#[derive(Debug)]
13946pub enum NodeRequest {
13947 /// Ensure that previous messages have been received server side. This is
13948 /// particularly useful after previous messages that created new tokens,
13949 /// because a token must be known to the sysmem server before sending the
13950 /// token to another participant.
13951 ///
13952 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
13953 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
13954 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
13955 /// to mitigate the possibility of a hostile/fake
13956 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
13957 /// Another way is to pass the token to
13958 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
13959 /// the token as part of exchanging it for a
13960 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
13961 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
13962 /// of stalling.
13963 ///
13964 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
13965 /// and then starting and completing a `Sync`, it's then safe to send the
13966 /// `BufferCollectionToken` client ends to other participants knowing the
13967 /// server will recognize the tokens when they're sent by the other
13968 /// participants to sysmem in a
13969 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
13970 /// efficient way to create tokens while avoiding unnecessary round trips.
13971 ///
13972 /// Other options include waiting for each
13973 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
13974 /// individually (using separate call to `Sync` after each), or calling
13975 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
13976 /// converted to a `BufferCollection` via
13977 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
13978 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
13979 /// the sync step and can create multiple tokens at once.
13980 Sync { responder: NodeSyncResponder },
13981 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
13982 ///
13983 /// Normally a participant will convert a `BufferCollectionToken` into a
13984 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
13985 /// `Release` via the token (and then close the channel immediately or
13986 /// shortly later in response to server closing the server end), which
13987 /// avoids causing buffer collection failure. Without a prior `Release`,
13988 /// closing the `BufferCollectionToken` client end will cause buffer
13989 /// collection failure.
13990 ///
13991 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
13992 ///
13993 /// By default the server handles unexpected closure of a
13994 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
13995 /// first) by failing the buffer collection. Partly this is to expedite
13996 /// closing VMO handles to reclaim memory when any participant fails. If a
13997 /// participant would like to cleanly close a `BufferCollection` without
13998 /// causing buffer collection failure, the participant can send `Release`
13999 /// before closing the `BufferCollection` client end. The `Release` can
14000 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
14001 /// buffer collection won't require constraints from this node in order to
14002 /// allocate. If after `SetConstraints`, the constraints are retained and
14003 /// aggregated, despite the lack of `BufferCollection` connection at the
14004 /// time of constraints aggregation.
14005 ///
14006 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
14007 ///
14008 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
14009 /// end (without `Release` first) will trigger failure of the buffer
14010 /// collection. To close a `BufferCollectionTokenGroup` channel without
14011 /// failing the buffer collection, ensure that AllChildrenPresent() has been
14012 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
14013 /// client end.
14014 ///
14015 /// If `Release` occurs before
14016 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
14017 /// buffer collection will fail (triggered by reception of `Release` without
14018 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
14019 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
14020 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
14021 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
14022 /// close requires `AllChildrenPresent` (if not already sent), then
14023 /// `Release`, then close client end.
14024 ///
14025 /// If `Release` occurs after `AllChildrenPresent`, the children and all
14026 /// their constraints remain intact (just as they would if the
14027 /// `BufferCollectionTokenGroup` channel had remained open), and the client
14028 /// end close doesn't trigger buffer collection failure.
14029 ///
14030 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
14031 ///
14032 /// For brevity, the per-channel-protocol paragraphs above ignore the
14033 /// separate failure domain created by
14034 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
14035 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
14036 /// unexpectedly closes (without `Release` first) and that client end is
14037 /// under a failure domain, instead of failing the whole buffer collection,
14038 /// the failure domain is failed, but the buffer collection itself is
14039 /// isolated from failure of the failure domain. Such failure domains can be
14040 /// nested, in which case only the inner-most failure domain in which the
14041 /// `Node` resides fails.
14042 Release { control_handle: NodeControlHandle },
14043 /// Set a name for VMOs in this buffer collection.
14044 ///
14045 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
14046 /// will be truncated to fit. The name of the vmo will be suffixed with the
14047 /// buffer index within the collection (if the suffix fits within
14048 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
14049 /// listed in the inspect data.
14050 ///
14051 /// The name only affects VMOs allocated after the name is set; this call
14052 /// does not rename existing VMOs. If multiple clients set different names
14053 /// then the larger priority value will win. Setting a new name with the
14054 /// same priority as a prior name doesn't change the name.
14055 ///
14056 /// All table fields are currently required.
14057 ///
14058 /// + request `priority` The name is only set if this is the first `SetName`
14059 /// or if `priority` is greater than any previous `priority` value in
14060 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
14061 /// + request `name` The name for VMOs created under this buffer collection.
14062 SetName { payload: NodeSetNameRequest, control_handle: NodeControlHandle },
14063 /// Set information about the current client that can be used by sysmem to
14064 /// help diagnose leaking memory and allocation stalls waiting for a
14065 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
14066 ///
14067 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
14068 /// `Node`(s) derived from this `Node`, unless overriden by
14069 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
14070 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
14071 ///
14072 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
14073 /// `Allocator` is the most efficient way to ensure that all
14074 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
14075 /// set, and is also more efficient than separately sending the same debug
14076 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
14077 /// created [`fuchsia.sysmem2/Node`].
14078 ///
14079 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
14080 /// indicate which client is closing their channel first, leading to subtree
14081 /// failure (which can be normal if the purpose of the subtree is over, but
14082 /// if happening earlier than expected, the client-channel-specific name can
14083 /// help diagnose where the failure is first coming from, from sysmem's
14084 /// point of view).
14085 ///
14086 /// All table fields are currently required.
14087 ///
14088 /// + request `name` This can be an arbitrary string, but the current
14089 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
14090 /// + request `id` This can be an arbitrary id, but the current process ID
14091 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
14092 SetDebugClientInfo { payload: NodeSetDebugClientInfoRequest, control_handle: NodeControlHandle },
14093 /// Sysmem logs a warning if sysmem hasn't seen
14094 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
14095 /// within 5 seconds after creation of a new collection.
14096 ///
14097 /// Clients can call this method to change when the log is printed. If
14098 /// multiple client set the deadline, it's unspecified which deadline will
14099 /// take effect.
14100 ///
14101 /// In most cases the default works well.
14102 ///
14103 /// All table fields are currently required.
14104 ///
14105 /// + request `deadline` The time at which sysmem will start trying to log
14106 /// the warning, unless all constraints are with sysmem by then.
14107 SetDebugTimeoutLogDeadline {
14108 payload: NodeSetDebugTimeoutLogDeadlineRequest,
14109 control_handle: NodeControlHandle,
14110 },
14111 /// This enables verbose logging for the buffer collection.
14112 ///
14113 /// Verbose logging includes constraints set via
14114 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
14115 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
14116 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
14117 /// the tree of `Node`(s).
14118 ///
14119 /// Normally sysmem prints only a single line complaint when aggregation
14120 /// fails, with just the specific detailed reason that aggregation failed,
14121 /// with little surrounding context. While this is often enough to diagnose
14122 /// a problem if only a small change was made and everything was working
14123 /// before the small change, it's often not particularly helpful for getting
14124 /// a new buffer collection to work for the first time. Especially with
14125 /// more complex trees of nodes, involving things like
14126 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
14127 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
14128 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
14129 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
14130 /// looks like and why it's failing a logical allocation, or why a tree or
14131 /// subtree is failing sooner than expected.
14132 ///
14133 /// The intent of the extra logging is to be acceptable from a performance
14134 /// point of view, under the assumption that verbose logging is only enabled
14135 /// on a low number of buffer collections. If we're not tracking down a bug,
14136 /// we shouldn't send this message.
14137 SetVerboseLogging { control_handle: NodeControlHandle },
14138 /// This gets a handle that can be used as a parameter to
14139 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
14140 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
14141 /// client obtained this handle from this `Node`.
14142 ///
14143 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
14144 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
14145 /// despite the two calls typically being on different channels.
14146 ///
14147 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
14148 ///
14149 /// All table fields are currently required.
14150 ///
14151 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
14152 /// different `Node` channel, to prove that the client obtained the handle
14153 /// from this `Node`.
14154 GetNodeRef { responder: NodeGetNodeRefResponder },
14155 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
14156 /// rooted at a different child token of a common parent
14157 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
14158 /// passed-in `node_ref`.
14159 ///
14160 /// This call is for assisting with admission control de-duplication, and
14161 /// with debugging.
14162 ///
14163 /// The `node_ref` must be obtained using
14164 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
14165 ///
14166 /// The `node_ref` can be a duplicated handle; it's not necessary to call
14167 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
14168 ///
14169 /// If a calling token may not actually be a valid token at all due to a
14170 /// potentially hostile/untrusted provider of the token, call
14171 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
14172 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
14173 /// never responds due to a calling token not being a real token (not really
14174 /// talking to sysmem). Another option is to call
14175 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
14176 /// which also validates the token along with converting it to a
14177 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
14178 ///
14179 /// All table fields are currently required.
14180 ///
14181 /// - response `is_alternate`
14182 /// - true: The first parent node in common between the calling node and
14183 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
14184 /// that the calling `Node` and the `node_ref` `Node` will not have both
14185 /// their constraints apply - rather sysmem will choose one or the other
14186 /// of the constraints - never both. This is because only one child of
14187 /// a `BufferCollectionTokenGroup` is selected during logical
14188 /// allocation, with only that one child's subtree contributing to
14189 /// constraints aggregation.
14190 /// - false: The first parent node in common between the calling `Node`
14191 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
14192 /// Currently, this means the first parent node in common is a
14193 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
14194 /// `Release`ed). This means that the calling `Node` and the `node_ref`
14195 /// `Node` may have both their constraints apply during constraints
14196 /// aggregation of the logical allocation, if both `Node`(s) are
14197 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
14198 /// this case, there is no `BufferCollectionTokenGroup` that will
14199 /// directly prevent the two `Node`(s) from both being selected and
14200 /// their constraints both aggregated, but even when false, one or both
14201 /// `Node`(s) may still be eliminated from consideration if one or both
14202 /// `Node`(s) has a direct or indirect parent
14203 /// `BufferCollectionTokenGroup` which selects a child subtree other
14204 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
14205 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
14206 /// associated with the same buffer collection as the calling `Node`.
14207 /// Another reason for this error is if the `node_ref` is an
14208 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
14209 /// a real `node_ref` obtained from `GetNodeRef`.
14210 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
14211 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
14212 /// the needed rights expected on a real `node_ref`.
14213 /// * No other failing status codes are returned by this call. However,
14214 /// sysmem may add additional codes in future, so the client should have
14215 /// sensible default handling for any failing status code.
14216 IsAlternateFor { payload: NodeIsAlternateForRequest, responder: NodeIsAlternateForResponder },
14217 /// Get the buffer collection ID. This ID is also available from
14218 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
14219 /// within the collection).
14220 ///
14221 /// This call is mainly useful in situations where we can't convey a
14222 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
14223 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
14224 /// handle, which can be joined back up with a `BufferCollection` client end
14225 /// that was created via a different path. Prefer to convey a
14226 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
14227 ///
14228 /// Trusting a `buffer_collection_id` value from a source other than sysmem
14229 /// is analogous to trusting a koid value from a source other than zircon.
14230 /// Both should be avoided unless really necessary, and both require
14231 /// caution. In some situations it may be reasonable to refer to a
14232 /// pre-established `BufferCollection` by `buffer_collection_id` via a
14233 /// protocol for efficiency reasons, but an incoming value purporting to be
14234 /// a `buffer_collection_id` is not sufficient alone to justify granting the
14235 /// sender of the `buffer_collection_id` any capability. The sender must
14236 /// first prove to a receiver that the sender has/had a VMO or has/had a
14237 /// `BufferCollectionToken` to the same collection by sending a handle that
14238 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
14239 /// `buffer_collection_id` value. The receiver should take care to avoid
14240 /// assuming that a sender had a `BufferCollectionToken` in cases where the
14241 /// sender has only proven that the sender had a VMO.
14242 ///
14243 /// - response `buffer_collection_id` This ID is unique per buffer
14244 /// collection per boot. Each buffer is uniquely identified by the
14245 /// `buffer_collection_id` and `buffer_index` together.
14246 GetBufferCollectionId { responder: NodeGetBufferCollectionIdResponder },
14247 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
14248 /// created after this message to weak, which means that a client's `Node`
14249 /// client end (or a child created after this message) is not alone
14250 /// sufficient to keep allocated VMOs alive.
14251 ///
14252 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
14253 /// `close_weak_asap`.
14254 ///
14255 /// This message is only permitted before the `Node` becomes ready for
14256 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
14257 /// * `BufferCollectionToken`: any time
14258 /// * `BufferCollection`: before `SetConstraints`
14259 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
14260 ///
14261 /// Currently, no conversion from strong `Node` to weak `Node` after ready
14262 /// for allocation is provided, but a client can simulate that by creating
14263 /// an additional `Node` before allocation and setting that additional
14264 /// `Node` to weak, and then potentially at some point later sending
14265 /// `Release` and closing the client end of the client's strong `Node`, but
14266 /// keeping the client's weak `Node`.
14267 ///
14268 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
14269 /// collection failure (all `Node` client end(s) will see
14270 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
14271 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
14272 /// this situation until all `Node`(s) are ready for allocation. For initial
14273 /// allocation to succeed, at least one strong `Node` is required to exist
14274 /// at allocation time, but after that client receives VMO handles, that
14275 /// client can `BufferCollection.Release` and close the client end without
14276 /// causing this type of failure.
14277 ///
14278 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
14279 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
14280 /// separately as appropriate.
14281 SetWeak { control_handle: NodeControlHandle },
14282 /// This indicates to sysmem that the client is prepared to pay attention to
14283 /// `close_weak_asap`.
14284 ///
14285 /// If sent, this message must be before
14286 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
14287 ///
14288 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
14289 /// send this message before `WaitForAllBuffersAllocated`, or a parent
14290 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
14291 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
14292 /// trigger buffer collection failure.
14293 ///
14294 /// This message is necessary because weak sysmem VMOs have not always been
14295 /// a thing, so older clients are not aware of the need to pay attention to
14296 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
14297 /// sysmem weak VMO handles asap. By having this message and requiring
14298 /// participants to indicate their acceptance of this aspect of the overall
14299 /// protocol, we avoid situations where an older client is delivered a weak
14300 /// VMO without any way for sysmem to get that VMO to close quickly later
14301 /// (and on a per-buffer basis).
14302 ///
14303 /// A participant that doesn't handle `close_weak_asap` and also doesn't
14304 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
14305 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
14306 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
14307 /// same participant has a child/delegate which does retrieve VMOs, that
14308 /// child/delegate will need to send `SetWeakOk` before
14309 /// `WaitForAllBuffersAllocated`.
14310 ///
14311 /// + request `for_child_nodes_also` If present and true, this means direct
14312 /// child nodes of this node created after this message plus all
14313 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
14314 /// those nodes. Any child node of this node that was created before this
14315 /// message is not included. This setting is "sticky" in the sense that a
14316 /// subsequent `SetWeakOk` without this bool set to true does not reset
14317 /// the server-side bool. If this creates a problem for a participant, a
14318 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
14319 /// tokens instead, as appropriate. A participant should only set
14320 /// `for_child_nodes_also` true if the participant can really promise to
14321 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
14322 /// weak VMO handles held by participants holding the corresponding child
14323 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
14324 /// which are using sysmem(1) can be weak, despite the clients of those
14325 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
14326 /// direct way to find out about `close_weak_asap`. This only applies to
14327 /// descendents of this `Node` which are using sysmem(1), not to this
14328 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
14329 /// token, which will fail allocation unless an ancestor of this `Node`
14330 /// specified `for_child_nodes_also` true.
14331 SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: NodeControlHandle },
14332 /// The server_end will be closed after this `Node` and any child nodes have
14333 /// have released their buffer counts, making those counts available for
14334 /// reservation by a different `Node` via
14335 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
14336 ///
14337 /// The `Node` buffer counts may not be released until the entire tree of
14338 /// `Node`(s) is closed or failed, because
14339 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
14340 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
14341 /// `Node` buffer counts remain reserved until the orphaned node is later
14342 /// cleaned up.
14343 ///
14344 /// If the `Node` exceeds a fairly large number of attached eventpair server
14345 /// ends, a log message will indicate this and the `Node` (and the
14346 /// appropriate) sub-tree will fail.
14347 ///
14348 /// The `server_end` will remain open when
14349 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
14350 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
14351 /// [`fuchsia.sysmem2/BufferCollection`].
14352 ///
14353 /// This message can also be used with a
14354 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
14355 AttachNodeTracking { payload: NodeAttachNodeTrackingRequest, control_handle: NodeControlHandle },
14356 /// An interaction was received which does not match any known method.
14357 #[non_exhaustive]
14358 _UnknownMethod {
14359 /// Ordinal of the method that was called.
14360 ordinal: u64,
14361 control_handle: NodeControlHandle,
14362 method_type: fidl::MethodType,
14363 },
14364}
14365
14366impl NodeRequest {
14367 #[allow(irrefutable_let_patterns)]
14368 pub fn into_sync(self) -> Option<(NodeSyncResponder)> {
14369 if let NodeRequest::Sync { responder } = self { Some((responder)) } else { None }
14370 }
14371
14372 #[allow(irrefutable_let_patterns)]
14373 pub fn into_release(self) -> Option<(NodeControlHandle)> {
14374 if let NodeRequest::Release { control_handle } = self {
14375 Some((control_handle))
14376 } else {
14377 None
14378 }
14379 }
14380
14381 #[allow(irrefutable_let_patterns)]
14382 pub fn into_set_name(self) -> Option<(NodeSetNameRequest, NodeControlHandle)> {
14383 if let NodeRequest::SetName { payload, control_handle } = self {
14384 Some((payload, control_handle))
14385 } else {
14386 None
14387 }
14388 }
14389
14390 #[allow(irrefutable_let_patterns)]
14391 pub fn into_set_debug_client_info(
14392 self,
14393 ) -> Option<(NodeSetDebugClientInfoRequest, NodeControlHandle)> {
14394 if let NodeRequest::SetDebugClientInfo { payload, control_handle } = self {
14395 Some((payload, control_handle))
14396 } else {
14397 None
14398 }
14399 }
14400
14401 #[allow(irrefutable_let_patterns)]
14402 pub fn into_set_debug_timeout_log_deadline(
14403 self,
14404 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, NodeControlHandle)> {
14405 if let NodeRequest::SetDebugTimeoutLogDeadline { payload, control_handle } = self {
14406 Some((payload, control_handle))
14407 } else {
14408 None
14409 }
14410 }
14411
14412 #[allow(irrefutable_let_patterns)]
14413 pub fn into_set_verbose_logging(self) -> Option<(NodeControlHandle)> {
14414 if let NodeRequest::SetVerboseLogging { control_handle } = self {
14415 Some((control_handle))
14416 } else {
14417 None
14418 }
14419 }
14420
14421 #[allow(irrefutable_let_patterns)]
14422 pub fn into_get_node_ref(self) -> Option<(NodeGetNodeRefResponder)> {
14423 if let NodeRequest::GetNodeRef { responder } = self { Some((responder)) } else { None }
14424 }
14425
14426 #[allow(irrefutable_let_patterns)]
14427 pub fn into_is_alternate_for(
14428 self,
14429 ) -> Option<(NodeIsAlternateForRequest, NodeIsAlternateForResponder)> {
14430 if let NodeRequest::IsAlternateFor { payload, responder } = self {
14431 Some((payload, responder))
14432 } else {
14433 None
14434 }
14435 }
14436
14437 #[allow(irrefutable_let_patterns)]
14438 pub fn into_get_buffer_collection_id(self) -> Option<(NodeGetBufferCollectionIdResponder)> {
14439 if let NodeRequest::GetBufferCollectionId { responder } = self {
14440 Some((responder))
14441 } else {
14442 None
14443 }
14444 }
14445
14446 #[allow(irrefutable_let_patterns)]
14447 pub fn into_set_weak(self) -> Option<(NodeControlHandle)> {
14448 if let NodeRequest::SetWeak { control_handle } = self {
14449 Some((control_handle))
14450 } else {
14451 None
14452 }
14453 }
14454
14455 #[allow(irrefutable_let_patterns)]
14456 pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, NodeControlHandle)> {
14457 if let NodeRequest::SetWeakOk { payload, control_handle } = self {
14458 Some((payload, control_handle))
14459 } else {
14460 None
14461 }
14462 }
14463
14464 #[allow(irrefutable_let_patterns)]
14465 pub fn into_attach_node_tracking(
14466 self,
14467 ) -> Option<(NodeAttachNodeTrackingRequest, NodeControlHandle)> {
14468 if let NodeRequest::AttachNodeTracking { payload, control_handle } = self {
14469 Some((payload, control_handle))
14470 } else {
14471 None
14472 }
14473 }
14474
14475 /// Name of the method defined in FIDL
14476 pub fn method_name(&self) -> &'static str {
14477 match *self {
14478 NodeRequest::Sync { .. } => "sync",
14479 NodeRequest::Release { .. } => "release",
14480 NodeRequest::SetName { .. } => "set_name",
14481 NodeRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
14482 NodeRequest::SetDebugTimeoutLogDeadline { .. } => "set_debug_timeout_log_deadline",
14483 NodeRequest::SetVerboseLogging { .. } => "set_verbose_logging",
14484 NodeRequest::GetNodeRef { .. } => "get_node_ref",
14485 NodeRequest::IsAlternateFor { .. } => "is_alternate_for",
14486 NodeRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
14487 NodeRequest::SetWeak { .. } => "set_weak",
14488 NodeRequest::SetWeakOk { .. } => "set_weak_ok",
14489 NodeRequest::AttachNodeTracking { .. } => "attach_node_tracking",
14490 NodeRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
14491 "unknown one-way method"
14492 }
14493 NodeRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
14494 "unknown two-way method"
14495 }
14496 }
14497 }
14498}
14499
14500#[derive(Debug, Clone)]
14501pub struct NodeControlHandle {
14502 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
14503}
14504
14505impl fidl::endpoints::ControlHandle for NodeControlHandle {
14506 fn shutdown(&self) {
14507 self.inner.shutdown()
14508 }
14509
14510 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
14511 self.inner.shutdown_with_epitaph(status)
14512 }
14513
14514 fn is_closed(&self) -> bool {
14515 self.inner.channel().is_closed()
14516 }
14517 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
14518 self.inner.channel().on_closed()
14519 }
14520
14521 #[cfg(target_os = "fuchsia")]
14522 fn signal_peer(
14523 &self,
14524 clear_mask: zx::Signals,
14525 set_mask: zx::Signals,
14526 ) -> Result<(), zx_status::Status> {
14527 use fidl::Peered;
14528 self.inner.channel().signal_peer(clear_mask, set_mask)
14529 }
14530}
14531
14532impl NodeControlHandle {}
14533
14534#[must_use = "FIDL methods require a response to be sent"]
14535#[derive(Debug)]
14536pub struct NodeSyncResponder {
14537 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14538 tx_id: u32,
14539}
14540
14541/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14542/// if the responder is dropped without sending a response, so that the client
14543/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14544impl std::ops::Drop for NodeSyncResponder {
14545 fn drop(&mut self) {
14546 self.control_handle.shutdown();
14547 // Safety: drops once, never accessed again
14548 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14549 }
14550}
14551
14552impl fidl::endpoints::Responder for NodeSyncResponder {
14553 type ControlHandle = NodeControlHandle;
14554
14555 fn control_handle(&self) -> &NodeControlHandle {
14556 &self.control_handle
14557 }
14558
14559 fn drop_without_shutdown(mut self) {
14560 // Safety: drops once, never accessed again due to mem::forget
14561 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14562 // Prevent Drop from running (which would shut down the channel)
14563 std::mem::forget(self);
14564 }
14565}
14566
14567impl NodeSyncResponder {
14568 /// Sends a response to the FIDL transaction.
14569 ///
14570 /// Sets the channel to shutdown if an error occurs.
14571 pub fn send(self) -> Result<(), fidl::Error> {
14572 let _result = self.send_raw();
14573 if _result.is_err() {
14574 self.control_handle.shutdown();
14575 }
14576 self.drop_without_shutdown();
14577 _result
14578 }
14579
14580 /// Similar to "send" but does not shutdown the channel if an error occurs.
14581 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
14582 let _result = self.send_raw();
14583 self.drop_without_shutdown();
14584 _result
14585 }
14586
14587 fn send_raw(&self) -> Result<(), fidl::Error> {
14588 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
14589 fidl::encoding::Flexible::new(()),
14590 self.tx_id,
14591 0x11ac2555cf575b54,
14592 fidl::encoding::DynamicFlags::FLEXIBLE,
14593 )
14594 }
14595}
14596
14597#[must_use = "FIDL methods require a response to be sent"]
14598#[derive(Debug)]
14599pub struct NodeGetNodeRefResponder {
14600 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14601 tx_id: u32,
14602}
14603
14604/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14605/// if the responder is dropped without sending a response, so that the client
14606/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14607impl std::ops::Drop for NodeGetNodeRefResponder {
14608 fn drop(&mut self) {
14609 self.control_handle.shutdown();
14610 // Safety: drops once, never accessed again
14611 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14612 }
14613}
14614
14615impl fidl::endpoints::Responder for NodeGetNodeRefResponder {
14616 type ControlHandle = NodeControlHandle;
14617
14618 fn control_handle(&self) -> &NodeControlHandle {
14619 &self.control_handle
14620 }
14621
14622 fn drop_without_shutdown(mut self) {
14623 // Safety: drops once, never accessed again due to mem::forget
14624 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14625 // Prevent Drop from running (which would shut down the channel)
14626 std::mem::forget(self);
14627 }
14628}
14629
14630impl NodeGetNodeRefResponder {
14631 /// Sends a response to the FIDL transaction.
14632 ///
14633 /// Sets the channel to shutdown if an error occurs.
14634 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14635 let _result = self.send_raw(payload);
14636 if _result.is_err() {
14637 self.control_handle.shutdown();
14638 }
14639 self.drop_without_shutdown();
14640 _result
14641 }
14642
14643 /// Similar to "send" but does not shutdown the channel if an error occurs.
14644 pub fn send_no_shutdown_on_err(
14645 self,
14646 mut payload: NodeGetNodeRefResponse,
14647 ) -> Result<(), fidl::Error> {
14648 let _result = self.send_raw(payload);
14649 self.drop_without_shutdown();
14650 _result
14651 }
14652
14653 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14654 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
14655 fidl::encoding::Flexible::new(&mut payload),
14656 self.tx_id,
14657 0x5b3d0e51614df053,
14658 fidl::encoding::DynamicFlags::FLEXIBLE,
14659 )
14660 }
14661}
14662
14663#[must_use = "FIDL methods require a response to be sent"]
14664#[derive(Debug)]
14665pub struct NodeIsAlternateForResponder {
14666 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14667 tx_id: u32,
14668}
14669
14670/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14671/// if the responder is dropped without sending a response, so that the client
14672/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14673impl std::ops::Drop for NodeIsAlternateForResponder {
14674 fn drop(&mut self) {
14675 self.control_handle.shutdown();
14676 // Safety: drops once, never accessed again
14677 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14678 }
14679}
14680
14681impl fidl::endpoints::Responder for NodeIsAlternateForResponder {
14682 type ControlHandle = NodeControlHandle;
14683
14684 fn control_handle(&self) -> &NodeControlHandle {
14685 &self.control_handle
14686 }
14687
14688 fn drop_without_shutdown(mut self) {
14689 // Safety: drops once, never accessed again due to mem::forget
14690 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14691 // Prevent Drop from running (which would shut down the channel)
14692 std::mem::forget(self);
14693 }
14694}
14695
14696impl NodeIsAlternateForResponder {
14697 /// Sends a response to the FIDL transaction.
14698 ///
14699 /// Sets the channel to shutdown if an error occurs.
14700 pub fn send(
14701 self,
14702 mut result: Result<&NodeIsAlternateForResponse, Error>,
14703 ) -> Result<(), fidl::Error> {
14704 let _result = self.send_raw(result);
14705 if _result.is_err() {
14706 self.control_handle.shutdown();
14707 }
14708 self.drop_without_shutdown();
14709 _result
14710 }
14711
14712 /// Similar to "send" but does not shutdown the channel if an error occurs.
14713 pub fn send_no_shutdown_on_err(
14714 self,
14715 mut result: Result<&NodeIsAlternateForResponse, Error>,
14716 ) -> Result<(), fidl::Error> {
14717 let _result = self.send_raw(result);
14718 self.drop_without_shutdown();
14719 _result
14720 }
14721
14722 fn send_raw(
14723 &self,
14724 mut result: Result<&NodeIsAlternateForResponse, Error>,
14725 ) -> Result<(), fidl::Error> {
14726 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
14727 NodeIsAlternateForResponse,
14728 Error,
14729 >>(
14730 fidl::encoding::FlexibleResult::new(result),
14731 self.tx_id,
14732 0x3a58e00157e0825,
14733 fidl::encoding::DynamicFlags::FLEXIBLE,
14734 )
14735 }
14736}
14737
14738#[must_use = "FIDL methods require a response to be sent"]
14739#[derive(Debug)]
14740pub struct NodeGetBufferCollectionIdResponder {
14741 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14742 tx_id: u32,
14743}
14744
14745/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14746/// if the responder is dropped without sending a response, so that the client
14747/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14748impl std::ops::Drop for NodeGetBufferCollectionIdResponder {
14749 fn drop(&mut self) {
14750 self.control_handle.shutdown();
14751 // Safety: drops once, never accessed again
14752 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14753 }
14754}
14755
14756impl fidl::endpoints::Responder for NodeGetBufferCollectionIdResponder {
14757 type ControlHandle = NodeControlHandle;
14758
14759 fn control_handle(&self) -> &NodeControlHandle {
14760 &self.control_handle
14761 }
14762
14763 fn drop_without_shutdown(mut self) {
14764 // Safety: drops once, never accessed again due to mem::forget
14765 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14766 // Prevent Drop from running (which would shut down the channel)
14767 std::mem::forget(self);
14768 }
14769}
14770
14771impl NodeGetBufferCollectionIdResponder {
14772 /// Sends a response to the FIDL transaction.
14773 ///
14774 /// Sets the channel to shutdown if an error occurs.
14775 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14776 let _result = self.send_raw(payload);
14777 if _result.is_err() {
14778 self.control_handle.shutdown();
14779 }
14780 self.drop_without_shutdown();
14781 _result
14782 }
14783
14784 /// Similar to "send" but does not shutdown the channel if an error occurs.
14785 pub fn send_no_shutdown_on_err(
14786 self,
14787 mut payload: &NodeGetBufferCollectionIdResponse,
14788 ) -> Result<(), fidl::Error> {
14789 let _result = self.send_raw(payload);
14790 self.drop_without_shutdown();
14791 _result
14792 }
14793
14794 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14795 self.control_handle
14796 .inner
14797 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
14798 fidl::encoding::Flexible::new(payload),
14799 self.tx_id,
14800 0x77d19a494b78ba8c,
14801 fidl::encoding::DynamicFlags::FLEXIBLE,
14802 )
14803 }
14804}
14805
14806#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
14807pub struct SecureMemMarker;
14808
14809impl fidl::endpoints::ProtocolMarker for SecureMemMarker {
14810 type Proxy = SecureMemProxy;
14811 type RequestStream = SecureMemRequestStream;
14812 #[cfg(target_os = "fuchsia")]
14813 type SynchronousProxy = SecureMemSynchronousProxy;
14814
14815 const DEBUG_NAME: &'static str = "(anonymous) SecureMem";
14816}
14817pub type SecureMemGetPhysicalSecureHeapsResult =
14818 Result<SecureMemGetPhysicalSecureHeapsResponse, Error>;
14819pub type SecureMemGetDynamicSecureHeapsResult =
14820 Result<SecureMemGetDynamicSecureHeapsResponse, Error>;
14821pub type SecureMemGetPhysicalSecureHeapPropertiesResult =
14822 Result<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>;
14823pub type SecureMemAddSecureHeapPhysicalRangeResult = Result<(), Error>;
14824pub type SecureMemDeleteSecureHeapPhysicalRangeResult = Result<(), Error>;
14825pub type SecureMemModifySecureHeapPhysicalRangeResult = Result<(), Error>;
14826pub type SecureMemZeroSubRangeResult = Result<(), Error>;
14827
14828pub trait SecureMemProxyInterface: Send + Sync {
14829 type GetPhysicalSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error>>
14830 + Send;
14831 fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut;
14832 type GetDynamicSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error>>
14833 + Send;
14834 fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut;
14835 type GetPhysicalSecureHeapPropertiesResponseFut: std::future::Future<
14836 Output = Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error>,
14837 > + Send;
14838 fn r#get_physical_secure_heap_properties(
14839 &self,
14840 payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14841 ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut;
14842 type AddSecureHeapPhysicalRangeResponseFut: std::future::Future<Output = Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error>>
14843 + Send;
14844 fn r#add_secure_heap_physical_range(
14845 &self,
14846 payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
14847 ) -> Self::AddSecureHeapPhysicalRangeResponseFut;
14848 type DeleteSecureHeapPhysicalRangeResponseFut: std::future::Future<
14849 Output = Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error>,
14850 > + Send;
14851 fn r#delete_secure_heap_physical_range(
14852 &self,
14853 payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
14854 ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut;
14855 type ModifySecureHeapPhysicalRangeResponseFut: std::future::Future<
14856 Output = Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error>,
14857 > + Send;
14858 fn r#modify_secure_heap_physical_range(
14859 &self,
14860 payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
14861 ) -> Self::ModifySecureHeapPhysicalRangeResponseFut;
14862 type ZeroSubRangeResponseFut: std::future::Future<Output = Result<SecureMemZeroSubRangeResult, fidl::Error>>
14863 + Send;
14864 fn r#zero_sub_range(
14865 &self,
14866 payload: &SecureMemZeroSubRangeRequest,
14867 ) -> Self::ZeroSubRangeResponseFut;
14868}
14869#[derive(Debug)]
14870#[cfg(target_os = "fuchsia")]
14871pub struct SecureMemSynchronousProxy {
14872 client: fidl::client::sync::Client,
14873}
14874
14875#[cfg(target_os = "fuchsia")]
14876impl fidl::endpoints::SynchronousProxy for SecureMemSynchronousProxy {
14877 type Proxy = SecureMemProxy;
14878 type Protocol = SecureMemMarker;
14879
14880 fn from_channel(inner: fidl::Channel) -> Self {
14881 Self::new(inner)
14882 }
14883
14884 fn into_channel(self) -> fidl::Channel {
14885 self.client.into_channel()
14886 }
14887
14888 fn as_channel(&self) -> &fidl::Channel {
14889 self.client.as_channel()
14890 }
14891}
14892
14893#[cfg(target_os = "fuchsia")]
14894impl SecureMemSynchronousProxy {
14895 pub fn new(channel: fidl::Channel) -> Self {
14896 Self { client: fidl::client::sync::Client::new(channel) }
14897 }
14898
14899 pub fn into_channel(self) -> fidl::Channel {
14900 self.client.into_channel()
14901 }
14902
14903 /// Waits until an event arrives and returns it. It is safe for other
14904 /// threads to make concurrent requests while waiting for an event.
14905 pub fn wait_for_event(
14906 &self,
14907 deadline: zx::MonotonicInstant,
14908 ) -> Result<SecureMemEvent, fidl::Error> {
14909 SecureMemEvent::decode(self.client.wait_for_event::<SecureMemMarker>(deadline)?)
14910 }
14911
14912 /// Gets the physical address and length of any secure heap whose physical
14913 /// range is configured via the TEE.
14914 ///
14915 /// Presently, these will be fixed physical addresses and lengths, with the
14916 /// location plumbed via the TEE.
14917 ///
14918 /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
14919 /// when there isn't any special heap-specific per-VMO setup or teardown
14920 /// required.
14921 ///
14922 /// The physical range must be secured/protected by the TEE before the
14923 /// securemem driver responds to this request with success.
14924 ///
14925 /// Sysmem should only call this once. Returning zero heaps is not a
14926 /// failure.
14927 ///
14928 /// Errors:
14929 /// * PROTOCOL_DEVIATION - called more than once.
14930 /// * UNSPECIFIED - generic internal error (such as in communication
14931 /// with TEE which doesn't generate zx_status_t errors).
14932 /// * other errors are allowed; any other errors should be treated the same
14933 /// as UNSPECIFIED.
14934 pub fn r#get_physical_secure_heaps(
14935 &self,
14936 ___deadline: zx::MonotonicInstant,
14937 ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
14938 let _response = self.client.send_query::<
14939 fidl::encoding::EmptyPayload,
14940 fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
14941 SecureMemMarker,
14942 >(
14943 (),
14944 0x38716300592073e3,
14945 fidl::encoding::DynamicFlags::FLEXIBLE,
14946 ___deadline,
14947 )?
14948 .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
14949 Ok(_response.map(|x| x))
14950 }
14951
14952 /// Gets information about any secure heaps whose physical pages are not
14953 /// configured by the TEE, but by sysmem.
14954 ///
14955 /// Sysmem should only call this once. Returning zero heaps is not a
14956 /// failure.
14957 ///
14958 /// Errors:
14959 /// * PROTOCOL_DEVIATION - called more than once.
14960 /// * UNSPECIFIED - generic internal error (such as in communication
14961 /// with TEE which doesn't generate zx_status_t errors).
14962 /// * other errors are allowed; any other errors should be treated the same
14963 /// as UNSPECIFIED.
14964 pub fn r#get_dynamic_secure_heaps(
14965 &self,
14966 ___deadline: zx::MonotonicInstant,
14967 ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
14968 let _response = self.client.send_query::<
14969 fidl::encoding::EmptyPayload,
14970 fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
14971 SecureMemMarker,
14972 >(
14973 (),
14974 0x1190847f99952834,
14975 fidl::encoding::DynamicFlags::FLEXIBLE,
14976 ___deadline,
14977 )?
14978 .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
14979 Ok(_response.map(|x| x))
14980 }
14981
14982 /// This request from sysmem to the securemem driver gets the properties of
14983 /// a protected/secure heap.
14984 ///
14985 /// This only handles heaps with a single contiguous physical extent.
14986 ///
14987 /// The heap's entire physical range is indicated in case this request needs
14988 /// some physical space to auto-detect how many ranges are REE-usable. Any
14989 /// temporary HW protection ranges will be deleted before this request
14990 /// completes.
14991 ///
14992 /// Errors:
14993 /// * UNSPECIFIED - generic internal error (such as in communication
14994 /// with TEE which doesn't generate zx_status_t errors).
14995 /// * other errors are allowed; any other errors should be treated the same
14996 /// as UNSPECIFIED.
14997 pub fn r#get_physical_secure_heap_properties(
14998 &self,
14999 mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15000 ___deadline: zx::MonotonicInstant,
15001 ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
15002 let _response = self.client.send_query::<
15003 SecureMemGetPhysicalSecureHeapPropertiesRequest,
15004 fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
15005 SecureMemMarker,
15006 >(
15007 payload,
15008 0xc6f06889009c7bc,
15009 fidl::encoding::DynamicFlags::FLEXIBLE,
15010 ___deadline,
15011 )?
15012 .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
15013 Ok(_response.map(|x| x))
15014 }
15015
15016 /// This request from sysmem to the securemem driver conveys a physical
15017 /// range to add, for a heap whose physical range(s) are set up via
15018 /// sysmem.
15019 ///
15020 /// Only sysmem can call this because only sysmem is handed the client end
15021 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15022 /// securemem driver is the server end of this protocol.
15023 ///
15024 /// The securemem driver must configure all the covered offsets as protected
15025 /// before responding to this message with success.
15026 ///
15027 /// On failure, the securemem driver must ensure the protected range was not
15028 /// created.
15029 ///
15030 /// Sysmem must only call this up to once if dynamic_protection_ranges
15031 /// false.
15032 ///
15033 /// If dynamic_protection_ranges is true, sysmem can call this multiple
15034 /// times as long as the current number of ranges never exceeds
15035 /// max_protected_range_count.
15036 ///
15037 /// The caller must not attempt to add a range that matches an
15038 /// already-existing range. Added ranges can overlap each other as long as
15039 /// no two ranges match exactly.
15040 ///
15041 /// Errors:
15042 /// * PROTOCOL_DEVIATION - called more than once when
15043 /// !dynamic_protection_ranges. Adding a heap that would cause overall
15044 /// heap count to exceed max_protected_range_count. Unexpected heap, or
15045 /// range that doesn't conform to protected_range_granularity. See log.
15046 /// * UNSPECIFIED - generic internal error (such as in communication
15047 /// with TEE which doesn't generate zx_status_t errors).
15048 /// * other errors are possible, such as from communication failures or
15049 /// server propagation of failures.
15050 pub fn r#add_secure_heap_physical_range(
15051 &self,
15052 mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15053 ___deadline: zx::MonotonicInstant,
15054 ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15055 let _response = self.client.send_query::<
15056 SecureMemAddSecureHeapPhysicalRangeRequest,
15057 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15058 SecureMemMarker,
15059 >(
15060 payload,
15061 0x35f695b9b6c7217a,
15062 fidl::encoding::DynamicFlags::FLEXIBLE,
15063 ___deadline,
15064 )?
15065 .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15066 Ok(_response.map(|x| x))
15067 }
15068
15069 /// This request from sysmem to the securemem driver conveys a physical
15070 /// range to delete, for a heap whose physical range(s) are set up via
15071 /// sysmem.
15072 ///
15073 /// Only sysmem can call this because only sysmem is handed the client end
15074 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15075 /// securemem driver is the server end of this protocol.
15076 ///
15077 /// The securemem driver must configure all the covered offsets as not
15078 /// protected before responding to this message with success.
15079 ///
15080 /// On failure, the securemem driver must ensure the protected range was not
15081 /// deleted.
15082 ///
15083 /// Sysmem must not call this if dynamic_protection_ranges false.
15084 ///
15085 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15086 /// on various ranges that exist at the time of the call.
15087 ///
15088 /// If any portion of the range being deleted is not also covered by another
15089 /// protected range, then any ongoing DMA to any part of the entire range
15090 /// may be interrupted / may fail, potentially in a way that's disruptive to
15091 /// the entire system (bus lockup or similar, depending on device details).
15092 /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15093 /// any portion of the range being deleted, unless the caller has other
15094 /// active ranges covering every block of the range being deleted. Ongoing
15095 /// DMA to/from blocks outside the range being deleted is never impacted by
15096 /// the deletion.
15097 ///
15098 /// Errors:
15099 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15100 /// Unexpected heap, or range that doesn't conform to
15101 /// protected_range_granularity.
15102 /// * UNSPECIFIED - generic internal error (such as in communication
15103 /// with TEE which doesn't generate zx_status_t errors).
15104 /// * NOT_FOUND - the specified range is not found.
15105 /// * other errors are possible, such as from communication failures or
15106 /// server propagation of failures.
15107 pub fn r#delete_secure_heap_physical_range(
15108 &self,
15109 mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15110 ___deadline: zx::MonotonicInstant,
15111 ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15112 let _response = self.client.send_query::<
15113 SecureMemDeleteSecureHeapPhysicalRangeRequest,
15114 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15115 SecureMemMarker,
15116 >(
15117 payload,
15118 0xeaa58c650264c9e,
15119 fidl::encoding::DynamicFlags::FLEXIBLE,
15120 ___deadline,
15121 )?
15122 .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15123 Ok(_response.map(|x| x))
15124 }
15125
15126 /// This request from sysmem to the securemem driver conveys a physical
15127 /// range to modify and its new base and length, for a heap whose physical
15128 /// range(s) are set up via sysmem.
15129 ///
15130 /// Only sysmem can call this because only sysmem is handed the client end
15131 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15132 /// securemem driver is the server end of this protocol.
15133 ///
15134 /// The securemem driver must configure the range to cover only the new
15135 /// offsets before responding to this message with success.
15136 ///
15137 /// On failure, the securemem driver must ensure the range was not changed.
15138 ///
15139 /// Sysmem must not call this if dynamic_protection_ranges false. Sysmem
15140 /// must not call this if !is_mod_protected_range_available.
15141 ///
15142 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15143 /// on various ranges that exist at the time of the call.
15144 ///
15145 /// The range must only be modified at one end or the other, but not both.
15146 /// If the range is getting shorter, and the un-covered blocks are not
15147 /// covered by other active ranges, any ongoing DMA to the entire range
15148 /// that's geting shorter may fail in a way that disrupts the entire system
15149 /// (bus lockup or similar), so the caller must ensure that no DMA is
15150 /// ongoing to any portion of a range that is getting shorter, unless the
15151 /// blocks being un-covered by the modification to this range are all
15152 /// covered by other active ranges, in which case no disruption to ongoing
15153 /// DMA will occur.
15154 ///
15155 /// If a range is modified to become <= zero length, the range is deleted.
15156 ///
15157 /// Errors:
15158 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15159 /// Unexpected heap, or old_range or new_range that doesn't conform to
15160 /// protected_range_granularity, or old_range and new_range differ in
15161 /// both begin and end (disallowed).
15162 /// * UNSPECIFIED - generic internal error (such as in communication
15163 /// with TEE which doesn't generate zx_status_t errors).
15164 /// * NOT_FOUND - the specified range is not found.
15165 /// * other errors are possible, such as from communication failures or
15166 /// server propagation of failures.
15167 pub fn r#modify_secure_heap_physical_range(
15168 &self,
15169 mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15170 ___deadline: zx::MonotonicInstant,
15171 ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15172 let _response = self.client.send_query::<
15173 SecureMemModifySecureHeapPhysicalRangeRequest,
15174 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15175 SecureMemMarker,
15176 >(
15177 payload,
15178 0x60b7448aa1187734,
15179 fidl::encoding::DynamicFlags::FLEXIBLE,
15180 ___deadline,
15181 )?
15182 .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15183 Ok(_response.map(|x| x))
15184 }
15185
15186 /// Zero a sub-range of a currently-existing physical range added via
15187 /// AddSecureHeapPhysicalRange(). The sub-range must be fully covered by
15188 /// exactly one physical range, and must not overlap with any other
15189 /// physical range.
15190 ///
15191 /// is_covering_range_explicit - When true, the covering range must be one
15192 /// of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15193 /// possibly modified since. When false, the covering range must not
15194 /// be one of the ranges explicitly created via
15195 /// AddSecureHeapPhysicalRange(), but the covering range must exist as
15196 /// a covering range not created via AddSecureHeapPhysicalRange(). The
15197 /// covering range is typically the entire physical range (or a range
15198 /// which covers even more) of a heap configured by the TEE and whose
15199 /// configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15200 ///
15201 /// Ongoing DMA is not disrupted by this request.
15202 ///
15203 /// Errors:
15204 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15205 /// Unexpected heap.
15206 /// * UNSPECIFIED - generic internal error (such as in communication
15207 /// with TEE which doesn't generate zx_status_t errors).
15208 /// * other errors are possible, such as from communication failures or
15209 /// server propagation of failures.
15210 pub fn r#zero_sub_range(
15211 &self,
15212 mut payload: &SecureMemZeroSubRangeRequest,
15213 ___deadline: zx::MonotonicInstant,
15214 ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15215 let _response = self.client.send_query::<
15216 SecureMemZeroSubRangeRequest,
15217 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15218 SecureMemMarker,
15219 >(
15220 payload,
15221 0x5b25b7901a385ce5,
15222 fidl::encoding::DynamicFlags::FLEXIBLE,
15223 ___deadline,
15224 )?
15225 .into_result::<SecureMemMarker>("zero_sub_range")?;
15226 Ok(_response.map(|x| x))
15227 }
15228}
15229
15230#[cfg(target_os = "fuchsia")]
15231impl From<SecureMemSynchronousProxy> for zx::NullableHandle {
15232 fn from(value: SecureMemSynchronousProxy) -> Self {
15233 value.into_channel().into()
15234 }
15235}
15236
15237#[cfg(target_os = "fuchsia")]
15238impl From<fidl::Channel> for SecureMemSynchronousProxy {
15239 fn from(value: fidl::Channel) -> Self {
15240 Self::new(value)
15241 }
15242}
15243
15244#[cfg(target_os = "fuchsia")]
15245impl fidl::endpoints::FromClient for SecureMemSynchronousProxy {
15246 type Protocol = SecureMemMarker;
15247
15248 fn from_client(value: fidl::endpoints::ClientEnd<SecureMemMarker>) -> Self {
15249 Self::new(value.into_channel())
15250 }
15251}
15252
15253#[derive(Debug, Clone)]
15254pub struct SecureMemProxy {
15255 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
15256}
15257
15258impl fidl::endpoints::Proxy for SecureMemProxy {
15259 type Protocol = SecureMemMarker;
15260
15261 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
15262 Self::new(inner)
15263 }
15264
15265 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
15266 self.client.into_channel().map_err(|client| Self { client })
15267 }
15268
15269 fn as_channel(&self) -> &::fidl::AsyncChannel {
15270 self.client.as_channel()
15271 }
15272}
15273
15274impl SecureMemProxy {
15275 /// Create a new Proxy for fuchsia.sysmem2/SecureMem.
15276 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
15277 let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
15278 Self { client: fidl::client::Client::new(channel, protocol_name) }
15279 }
15280
15281 /// Get a Stream of events from the remote end of the protocol.
15282 ///
15283 /// # Panics
15284 ///
15285 /// Panics if the event stream was already taken.
15286 pub fn take_event_stream(&self) -> SecureMemEventStream {
15287 SecureMemEventStream { event_receiver: self.client.take_event_receiver() }
15288 }
15289
15290 /// Gets the physical address and length of any secure heap whose physical
15291 /// range is configured via the TEE.
15292 ///
15293 /// Presently, these will be fixed physical addresses and lengths, with the
15294 /// location plumbed via the TEE.
15295 ///
15296 /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
15297 /// when there isn't any special heap-specific per-VMO setup or teardown
15298 /// required.
15299 ///
15300 /// The physical range must be secured/protected by the TEE before the
15301 /// securemem driver responds to this request with success.
15302 ///
15303 /// Sysmem should only call this once. Returning zero heaps is not a
15304 /// failure.
15305 ///
15306 /// Errors:
15307 /// * PROTOCOL_DEVIATION - called more than once.
15308 /// * UNSPECIFIED - generic internal error (such as in communication
15309 /// with TEE which doesn't generate zx_status_t errors).
15310 /// * other errors are allowed; any other errors should be treated the same
15311 /// as UNSPECIFIED.
15312 pub fn r#get_physical_secure_heaps(
15313 &self,
15314 ) -> fidl::client::QueryResponseFut<
15315 SecureMemGetPhysicalSecureHeapsResult,
15316 fidl::encoding::DefaultFuchsiaResourceDialect,
15317 > {
15318 SecureMemProxyInterface::r#get_physical_secure_heaps(self)
15319 }
15320
15321 /// Gets information about any secure heaps whose physical pages are not
15322 /// configured by the TEE, but by sysmem.
15323 ///
15324 /// Sysmem should only call this once. Returning zero heaps is not a
15325 /// failure.
15326 ///
15327 /// Errors:
15328 /// * PROTOCOL_DEVIATION - called more than once.
15329 /// * UNSPECIFIED - generic internal error (such as in communication
15330 /// with TEE which doesn't generate zx_status_t errors).
15331 /// * other errors are allowed; any other errors should be treated the same
15332 /// as UNSPECIFIED.
15333 pub fn r#get_dynamic_secure_heaps(
15334 &self,
15335 ) -> fidl::client::QueryResponseFut<
15336 SecureMemGetDynamicSecureHeapsResult,
15337 fidl::encoding::DefaultFuchsiaResourceDialect,
15338 > {
15339 SecureMemProxyInterface::r#get_dynamic_secure_heaps(self)
15340 }
15341
15342 /// This request from sysmem to the securemem driver gets the properties of
15343 /// a protected/secure heap.
15344 ///
15345 /// This only handles heaps with a single contiguous physical extent.
15346 ///
15347 /// The heap's entire physical range is indicated in case this request needs
15348 /// some physical space to auto-detect how many ranges are REE-usable. Any
15349 /// temporary HW protection ranges will be deleted before this request
15350 /// completes.
15351 ///
15352 /// Errors:
15353 /// * UNSPECIFIED - generic internal error (such as in communication
15354 /// with TEE which doesn't generate zx_status_t errors).
15355 /// * other errors are allowed; any other errors should be treated the same
15356 /// as UNSPECIFIED.
15357 pub fn r#get_physical_secure_heap_properties(
15358 &self,
15359 mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15360 ) -> fidl::client::QueryResponseFut<
15361 SecureMemGetPhysicalSecureHeapPropertiesResult,
15362 fidl::encoding::DefaultFuchsiaResourceDialect,
15363 > {
15364 SecureMemProxyInterface::r#get_physical_secure_heap_properties(self, payload)
15365 }
15366
15367 /// This request from sysmem to the securemem driver conveys a physical
15368 /// range to add, for a heap whose physical range(s) are set up via
15369 /// sysmem.
15370 ///
15371 /// Only sysmem can call this because only sysmem is handed the client end
15372 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15373 /// securemem driver is the server end of this protocol.
15374 ///
15375 /// The securemem driver must configure all the covered offsets as protected
15376 /// before responding to this message with success.
15377 ///
15378 /// On failure, the securemem driver must ensure the protected range was not
15379 /// created.
15380 ///
15381 /// Sysmem must only call this up to once if dynamic_protection_ranges
15382 /// false.
15383 ///
15384 /// If dynamic_protection_ranges is true, sysmem can call this multiple
15385 /// times as long as the current number of ranges never exceeds
15386 /// max_protected_range_count.
15387 ///
15388 /// The caller must not attempt to add a range that matches an
15389 /// already-existing range. Added ranges can overlap each other as long as
15390 /// no two ranges match exactly.
15391 ///
15392 /// Errors:
15393 /// * PROTOCOL_DEVIATION - called more than once when
15394 /// !dynamic_protection_ranges. Adding a heap that would cause overall
15395 /// heap count to exceed max_protected_range_count. Unexpected heap, or
15396 /// range that doesn't conform to protected_range_granularity. See log.
15397 /// * UNSPECIFIED - generic internal error (such as in communication
15398 /// with TEE which doesn't generate zx_status_t errors).
15399 /// * other errors are possible, such as from communication failures or
15400 /// server propagation of failures.
15401 pub fn r#add_secure_heap_physical_range(
15402 &self,
15403 mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15404 ) -> fidl::client::QueryResponseFut<
15405 SecureMemAddSecureHeapPhysicalRangeResult,
15406 fidl::encoding::DefaultFuchsiaResourceDialect,
15407 > {
15408 SecureMemProxyInterface::r#add_secure_heap_physical_range(self, payload)
15409 }
15410
15411 /// This request from sysmem to the securemem driver conveys a physical
15412 /// range to delete, for a heap whose physical range(s) are set up via
15413 /// sysmem.
15414 ///
15415 /// Only sysmem can call this because only sysmem is handed the client end
15416 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15417 /// securemem driver is the server end of this protocol.
15418 ///
15419 /// The securemem driver must configure all the covered offsets as not
15420 /// protected before responding to this message with success.
15421 ///
15422 /// On failure, the securemem driver must ensure the protected range was not
15423 /// deleted.
15424 ///
15425 /// Sysmem must not call this if dynamic_protection_ranges false.
15426 ///
15427 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15428 /// on various ranges that exist at the time of the call.
15429 ///
15430 /// If any portion of the range being deleted is not also covered by another
15431 /// protected range, then any ongoing DMA to any part of the entire range
15432 /// may be interrupted / may fail, potentially in a way that's disruptive to
15433 /// the entire system (bus lockup or similar, depending on device details).
15434 /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15435 /// any portion of the range being deleted, unless the caller has other
15436 /// active ranges covering every block of the range being deleted. Ongoing
15437 /// DMA to/from blocks outside the range being deleted is never impacted by
15438 /// the deletion.
15439 ///
15440 /// Errors:
15441 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15442 /// Unexpected heap, or range that doesn't conform to
15443 /// protected_range_granularity.
15444 /// * UNSPECIFIED - generic internal error (such as in communication
15445 /// with TEE which doesn't generate zx_status_t errors).
15446 /// * NOT_FOUND - the specified range is not found.
15447 /// * other errors are possible, such as from communication failures or
15448 /// server propagation of failures.
15449 pub fn r#delete_secure_heap_physical_range(
15450 &self,
15451 mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15452 ) -> fidl::client::QueryResponseFut<
15453 SecureMemDeleteSecureHeapPhysicalRangeResult,
15454 fidl::encoding::DefaultFuchsiaResourceDialect,
15455 > {
15456 SecureMemProxyInterface::r#delete_secure_heap_physical_range(self, payload)
15457 }
15458
15459 /// This request from sysmem to the securemem driver conveys a physical
15460 /// range to modify and its new base and length, for a heap whose physical
15461 /// range(s) are set up via sysmem.
15462 ///
15463 /// Only sysmem can call this because only sysmem is handed the client end
15464 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15465 /// securemem driver is the server end of this protocol.
15466 ///
15467 /// The securemem driver must configure the range to cover only the new
15468 /// offsets before responding to this message with success.
15469 ///
15470 /// On failure, the securemem driver must ensure the range was not changed.
15471 ///
15472 /// Sysmem must not call this if dynamic_protection_ranges false. Sysmem
15473 /// must not call this if !is_mod_protected_range_available.
15474 ///
15475 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15476 /// on various ranges that exist at the time of the call.
15477 ///
15478 /// The range must only be modified at one end or the other, but not both.
15479 /// If the range is getting shorter, and the un-covered blocks are not
15480 /// covered by other active ranges, any ongoing DMA to the entire range
15481 /// that's geting shorter may fail in a way that disrupts the entire system
15482 /// (bus lockup or similar), so the caller must ensure that no DMA is
15483 /// ongoing to any portion of a range that is getting shorter, unless the
15484 /// blocks being un-covered by the modification to this range are all
15485 /// covered by other active ranges, in which case no disruption to ongoing
15486 /// DMA will occur.
15487 ///
15488 /// If a range is modified to become <= zero length, the range is deleted.
15489 ///
15490 /// Errors:
15491 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15492 /// Unexpected heap, or old_range or new_range that doesn't conform to
15493 /// protected_range_granularity, or old_range and new_range differ in
15494 /// both begin and end (disallowed).
15495 /// * UNSPECIFIED - generic internal error (such as in communication
15496 /// with TEE which doesn't generate zx_status_t errors).
15497 /// * NOT_FOUND - the specified range is not found.
15498 /// * other errors are possible, such as from communication failures or
15499 /// server propagation of failures.
15500 pub fn r#modify_secure_heap_physical_range(
15501 &self,
15502 mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15503 ) -> fidl::client::QueryResponseFut<
15504 SecureMemModifySecureHeapPhysicalRangeResult,
15505 fidl::encoding::DefaultFuchsiaResourceDialect,
15506 > {
15507 SecureMemProxyInterface::r#modify_secure_heap_physical_range(self, payload)
15508 }
15509
15510 /// Zero a sub-range of a currently-existing physical range added via
15511 /// AddSecureHeapPhysicalRange(). The sub-range must be fully covered by
15512 /// exactly one physical range, and must not overlap with any other
15513 /// physical range.
15514 ///
15515 /// is_covering_range_explicit - When true, the covering range must be one
15516 /// of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15517 /// possibly modified since. When false, the covering range must not
15518 /// be one of the ranges explicitly created via
15519 /// AddSecureHeapPhysicalRange(), but the covering range must exist as
15520 /// a covering range not created via AddSecureHeapPhysicalRange(). The
15521 /// covering range is typically the entire physical range (or a range
15522 /// which covers even more) of a heap configured by the TEE and whose
15523 /// configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15524 ///
15525 /// Ongoing DMA is not disrupted by this request.
15526 ///
15527 /// Errors:
15528 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15529 /// Unexpected heap.
15530 /// * UNSPECIFIED - generic internal error (such as in communication
15531 /// with TEE which doesn't generate zx_status_t errors).
15532 /// * other errors are possible, such as from communication failures or
15533 /// server propagation of failures.
15534 pub fn r#zero_sub_range(
15535 &self,
15536 mut payload: &SecureMemZeroSubRangeRequest,
15537 ) -> fidl::client::QueryResponseFut<
15538 SecureMemZeroSubRangeResult,
15539 fidl::encoding::DefaultFuchsiaResourceDialect,
15540 > {
15541 SecureMemProxyInterface::r#zero_sub_range(self, payload)
15542 }
15543}
15544
15545impl SecureMemProxyInterface for SecureMemProxy {
15546 type GetPhysicalSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15547 SecureMemGetPhysicalSecureHeapsResult,
15548 fidl::encoding::DefaultFuchsiaResourceDialect,
15549 >;
15550 fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut {
15551 fn _decode(
15552 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15553 ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
15554 let _response = fidl::client::decode_transaction_body::<
15555 fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
15556 fidl::encoding::DefaultFuchsiaResourceDialect,
15557 0x38716300592073e3,
15558 >(_buf?)?
15559 .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
15560 Ok(_response.map(|x| x))
15561 }
15562 self.client.send_query_and_decode::<
15563 fidl::encoding::EmptyPayload,
15564 SecureMemGetPhysicalSecureHeapsResult,
15565 >(
15566 (),
15567 0x38716300592073e3,
15568 fidl::encoding::DynamicFlags::FLEXIBLE,
15569 _decode,
15570 )
15571 }
15572
15573 type GetDynamicSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15574 SecureMemGetDynamicSecureHeapsResult,
15575 fidl::encoding::DefaultFuchsiaResourceDialect,
15576 >;
15577 fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut {
15578 fn _decode(
15579 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15580 ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
15581 let _response = fidl::client::decode_transaction_body::<
15582 fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
15583 fidl::encoding::DefaultFuchsiaResourceDialect,
15584 0x1190847f99952834,
15585 >(_buf?)?
15586 .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
15587 Ok(_response.map(|x| x))
15588 }
15589 self.client.send_query_and_decode::<
15590 fidl::encoding::EmptyPayload,
15591 SecureMemGetDynamicSecureHeapsResult,
15592 >(
15593 (),
15594 0x1190847f99952834,
15595 fidl::encoding::DynamicFlags::FLEXIBLE,
15596 _decode,
15597 )
15598 }
15599
15600 type GetPhysicalSecureHeapPropertiesResponseFut = fidl::client::QueryResponseFut<
15601 SecureMemGetPhysicalSecureHeapPropertiesResult,
15602 fidl::encoding::DefaultFuchsiaResourceDialect,
15603 >;
15604 fn r#get_physical_secure_heap_properties(
15605 &self,
15606 mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15607 ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut {
15608 fn _decode(
15609 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15610 ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
15611 let _response = fidl::client::decode_transaction_body::<
15612 fidl::encoding::FlexibleResultType<
15613 SecureMemGetPhysicalSecureHeapPropertiesResponse,
15614 Error,
15615 >,
15616 fidl::encoding::DefaultFuchsiaResourceDialect,
15617 0xc6f06889009c7bc,
15618 >(_buf?)?
15619 .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
15620 Ok(_response.map(|x| x))
15621 }
15622 self.client.send_query_and_decode::<
15623 SecureMemGetPhysicalSecureHeapPropertiesRequest,
15624 SecureMemGetPhysicalSecureHeapPropertiesResult,
15625 >(
15626 payload,
15627 0xc6f06889009c7bc,
15628 fidl::encoding::DynamicFlags::FLEXIBLE,
15629 _decode,
15630 )
15631 }
15632
15633 type AddSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15634 SecureMemAddSecureHeapPhysicalRangeResult,
15635 fidl::encoding::DefaultFuchsiaResourceDialect,
15636 >;
15637 fn r#add_secure_heap_physical_range(
15638 &self,
15639 mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15640 ) -> Self::AddSecureHeapPhysicalRangeResponseFut {
15641 fn _decode(
15642 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15643 ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15644 let _response = fidl::client::decode_transaction_body::<
15645 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15646 fidl::encoding::DefaultFuchsiaResourceDialect,
15647 0x35f695b9b6c7217a,
15648 >(_buf?)?
15649 .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15650 Ok(_response.map(|x| x))
15651 }
15652 self.client.send_query_and_decode::<
15653 SecureMemAddSecureHeapPhysicalRangeRequest,
15654 SecureMemAddSecureHeapPhysicalRangeResult,
15655 >(
15656 payload,
15657 0x35f695b9b6c7217a,
15658 fidl::encoding::DynamicFlags::FLEXIBLE,
15659 _decode,
15660 )
15661 }
15662
15663 type DeleteSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15664 SecureMemDeleteSecureHeapPhysicalRangeResult,
15665 fidl::encoding::DefaultFuchsiaResourceDialect,
15666 >;
15667 fn r#delete_secure_heap_physical_range(
15668 &self,
15669 mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15670 ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut {
15671 fn _decode(
15672 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15673 ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15674 let _response = fidl::client::decode_transaction_body::<
15675 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15676 fidl::encoding::DefaultFuchsiaResourceDialect,
15677 0xeaa58c650264c9e,
15678 >(_buf?)?
15679 .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15680 Ok(_response.map(|x| x))
15681 }
15682 self.client.send_query_and_decode::<
15683 SecureMemDeleteSecureHeapPhysicalRangeRequest,
15684 SecureMemDeleteSecureHeapPhysicalRangeResult,
15685 >(
15686 payload,
15687 0xeaa58c650264c9e,
15688 fidl::encoding::DynamicFlags::FLEXIBLE,
15689 _decode,
15690 )
15691 }
15692
15693 type ModifySecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15694 SecureMemModifySecureHeapPhysicalRangeResult,
15695 fidl::encoding::DefaultFuchsiaResourceDialect,
15696 >;
15697 fn r#modify_secure_heap_physical_range(
15698 &self,
15699 mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15700 ) -> Self::ModifySecureHeapPhysicalRangeResponseFut {
15701 fn _decode(
15702 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15703 ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15704 let _response = fidl::client::decode_transaction_body::<
15705 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15706 fidl::encoding::DefaultFuchsiaResourceDialect,
15707 0x60b7448aa1187734,
15708 >(_buf?)?
15709 .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15710 Ok(_response.map(|x| x))
15711 }
15712 self.client.send_query_and_decode::<
15713 SecureMemModifySecureHeapPhysicalRangeRequest,
15714 SecureMemModifySecureHeapPhysicalRangeResult,
15715 >(
15716 payload,
15717 0x60b7448aa1187734,
15718 fidl::encoding::DynamicFlags::FLEXIBLE,
15719 _decode,
15720 )
15721 }
15722
15723 type ZeroSubRangeResponseFut = fidl::client::QueryResponseFut<
15724 SecureMemZeroSubRangeResult,
15725 fidl::encoding::DefaultFuchsiaResourceDialect,
15726 >;
15727 fn r#zero_sub_range(
15728 &self,
15729 mut payload: &SecureMemZeroSubRangeRequest,
15730 ) -> Self::ZeroSubRangeResponseFut {
15731 fn _decode(
15732 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15733 ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15734 let _response = fidl::client::decode_transaction_body::<
15735 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15736 fidl::encoding::DefaultFuchsiaResourceDialect,
15737 0x5b25b7901a385ce5,
15738 >(_buf?)?
15739 .into_result::<SecureMemMarker>("zero_sub_range")?;
15740 Ok(_response.map(|x| x))
15741 }
15742 self.client
15743 .send_query_and_decode::<SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResult>(
15744 payload,
15745 0x5b25b7901a385ce5,
15746 fidl::encoding::DynamicFlags::FLEXIBLE,
15747 _decode,
15748 )
15749 }
15750}
15751
15752pub struct SecureMemEventStream {
15753 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
15754}
15755
15756impl std::marker::Unpin for SecureMemEventStream {}
15757
15758impl futures::stream::FusedStream for SecureMemEventStream {
15759 fn is_terminated(&self) -> bool {
15760 self.event_receiver.is_terminated()
15761 }
15762}
15763
15764impl futures::Stream for SecureMemEventStream {
15765 type Item = Result<SecureMemEvent, fidl::Error>;
15766
15767 fn poll_next(
15768 mut self: std::pin::Pin<&mut Self>,
15769 cx: &mut std::task::Context<'_>,
15770 ) -> std::task::Poll<Option<Self::Item>> {
15771 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
15772 &mut self.event_receiver,
15773 cx
15774 )?) {
15775 Some(buf) => std::task::Poll::Ready(Some(SecureMemEvent::decode(buf))),
15776 None => std::task::Poll::Ready(None),
15777 }
15778 }
15779}
15780
15781#[derive(Debug)]
15782pub enum SecureMemEvent {
15783 #[non_exhaustive]
15784 _UnknownEvent {
15785 /// Ordinal of the event that was sent.
15786 ordinal: u64,
15787 },
15788}
15789
15790impl SecureMemEvent {
15791 /// Decodes a message buffer as a [`SecureMemEvent`].
15792 fn decode(
15793 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
15794 ) -> Result<SecureMemEvent, fidl::Error> {
15795 let (bytes, _handles) = buf.split_mut();
15796 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15797 debug_assert_eq!(tx_header.tx_id, 0);
15798 match tx_header.ordinal {
15799 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
15800 Ok(SecureMemEvent::_UnknownEvent { ordinal: tx_header.ordinal })
15801 }
15802 _ => Err(fidl::Error::UnknownOrdinal {
15803 ordinal: tx_header.ordinal,
15804 protocol_name: <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15805 }),
15806 }
15807 }
15808}
15809
15810/// A Stream of incoming requests for fuchsia.sysmem2/SecureMem.
15811pub struct SecureMemRequestStream {
15812 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15813 is_terminated: bool,
15814}
15815
15816impl std::marker::Unpin for SecureMemRequestStream {}
15817
15818impl futures::stream::FusedStream for SecureMemRequestStream {
15819 fn is_terminated(&self) -> bool {
15820 self.is_terminated
15821 }
15822}
15823
15824impl fidl::endpoints::RequestStream for SecureMemRequestStream {
15825 type Protocol = SecureMemMarker;
15826 type ControlHandle = SecureMemControlHandle;
15827
15828 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
15829 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
15830 }
15831
15832 fn control_handle(&self) -> Self::ControlHandle {
15833 SecureMemControlHandle { inner: self.inner.clone() }
15834 }
15835
15836 fn into_inner(
15837 self,
15838 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
15839 {
15840 (self.inner, self.is_terminated)
15841 }
15842
15843 fn from_inner(
15844 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15845 is_terminated: bool,
15846 ) -> Self {
15847 Self { inner, is_terminated }
15848 }
15849}
15850
15851impl futures::Stream for SecureMemRequestStream {
15852 type Item = Result<SecureMemRequest, fidl::Error>;
15853
15854 fn poll_next(
15855 mut self: std::pin::Pin<&mut Self>,
15856 cx: &mut std::task::Context<'_>,
15857 ) -> std::task::Poll<Option<Self::Item>> {
15858 let this = &mut *self;
15859 if this.inner.check_shutdown(cx) {
15860 this.is_terminated = true;
15861 return std::task::Poll::Ready(None);
15862 }
15863 if this.is_terminated {
15864 panic!("polled SecureMemRequestStream after completion");
15865 }
15866 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
15867 |bytes, handles| {
15868 match this.inner.channel().read_etc(cx, bytes, handles) {
15869 std::task::Poll::Ready(Ok(())) => {}
15870 std::task::Poll::Pending => return std::task::Poll::Pending,
15871 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
15872 this.is_terminated = true;
15873 return std::task::Poll::Ready(None);
15874 }
15875 std::task::Poll::Ready(Err(e)) => {
15876 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
15877 e.into(),
15878 ))));
15879 }
15880 }
15881
15882 // A message has been received from the channel
15883 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15884
15885 std::task::Poll::Ready(Some(match header.ordinal {
15886 0x38716300592073e3 => {
15887 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15888 let mut req = fidl::new_empty!(
15889 fidl::encoding::EmptyPayload,
15890 fidl::encoding::DefaultFuchsiaResourceDialect
15891 );
15892 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15893 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15894 Ok(SecureMemRequest::GetPhysicalSecureHeaps {
15895 responder: SecureMemGetPhysicalSecureHeapsResponder {
15896 control_handle: std::mem::ManuallyDrop::new(control_handle),
15897 tx_id: header.tx_id,
15898 },
15899 })
15900 }
15901 0x1190847f99952834 => {
15902 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15903 let mut req = fidl::new_empty!(
15904 fidl::encoding::EmptyPayload,
15905 fidl::encoding::DefaultFuchsiaResourceDialect
15906 );
15907 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15908 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15909 Ok(SecureMemRequest::GetDynamicSecureHeaps {
15910 responder: SecureMemGetDynamicSecureHeapsResponder {
15911 control_handle: std::mem::ManuallyDrop::new(control_handle),
15912 tx_id: header.tx_id,
15913 },
15914 })
15915 }
15916 0xc6f06889009c7bc => {
15917 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15918 let mut req = fidl::new_empty!(
15919 SecureMemGetPhysicalSecureHeapPropertiesRequest,
15920 fidl::encoding::DefaultFuchsiaResourceDialect
15921 );
15922 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemGetPhysicalSecureHeapPropertiesRequest>(&header, _body_bytes, handles, &mut req)?;
15923 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15924 Ok(SecureMemRequest::GetPhysicalSecureHeapProperties {
15925 payload: req,
15926 responder: SecureMemGetPhysicalSecureHeapPropertiesResponder {
15927 control_handle: std::mem::ManuallyDrop::new(control_handle),
15928 tx_id: header.tx_id,
15929 },
15930 })
15931 }
15932 0x35f695b9b6c7217a => {
15933 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15934 let mut req = fidl::new_empty!(
15935 SecureMemAddSecureHeapPhysicalRangeRequest,
15936 fidl::encoding::DefaultFuchsiaResourceDialect
15937 );
15938 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemAddSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15939 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15940 Ok(SecureMemRequest::AddSecureHeapPhysicalRange {
15941 payload: req,
15942 responder: SecureMemAddSecureHeapPhysicalRangeResponder {
15943 control_handle: std::mem::ManuallyDrop::new(control_handle),
15944 tx_id: header.tx_id,
15945 },
15946 })
15947 }
15948 0xeaa58c650264c9e => {
15949 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15950 let mut req = fidl::new_empty!(
15951 SecureMemDeleteSecureHeapPhysicalRangeRequest,
15952 fidl::encoding::DefaultFuchsiaResourceDialect
15953 );
15954 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemDeleteSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15955 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15956 Ok(SecureMemRequest::DeleteSecureHeapPhysicalRange {
15957 payload: req,
15958 responder: SecureMemDeleteSecureHeapPhysicalRangeResponder {
15959 control_handle: std::mem::ManuallyDrop::new(control_handle),
15960 tx_id: header.tx_id,
15961 },
15962 })
15963 }
15964 0x60b7448aa1187734 => {
15965 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15966 let mut req = fidl::new_empty!(
15967 SecureMemModifySecureHeapPhysicalRangeRequest,
15968 fidl::encoding::DefaultFuchsiaResourceDialect
15969 );
15970 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemModifySecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15971 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15972 Ok(SecureMemRequest::ModifySecureHeapPhysicalRange {
15973 payload: req,
15974 responder: SecureMemModifySecureHeapPhysicalRangeResponder {
15975 control_handle: std::mem::ManuallyDrop::new(control_handle),
15976 tx_id: header.tx_id,
15977 },
15978 })
15979 }
15980 0x5b25b7901a385ce5 => {
15981 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15982 let mut req = fidl::new_empty!(
15983 SecureMemZeroSubRangeRequest,
15984 fidl::encoding::DefaultFuchsiaResourceDialect
15985 );
15986 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemZeroSubRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15987 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15988 Ok(SecureMemRequest::ZeroSubRange {
15989 payload: req,
15990 responder: SecureMemZeroSubRangeResponder {
15991 control_handle: std::mem::ManuallyDrop::new(control_handle),
15992 tx_id: header.tx_id,
15993 },
15994 })
15995 }
15996 _ if header.tx_id == 0
15997 && header
15998 .dynamic_flags()
15999 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
16000 {
16001 Ok(SecureMemRequest::_UnknownMethod {
16002 ordinal: header.ordinal,
16003 control_handle: SecureMemControlHandle { inner: this.inner.clone() },
16004 method_type: fidl::MethodType::OneWay,
16005 })
16006 }
16007 _ if header
16008 .dynamic_flags()
16009 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
16010 {
16011 this.inner.send_framework_err(
16012 fidl::encoding::FrameworkErr::UnknownMethod,
16013 header.tx_id,
16014 header.ordinal,
16015 header.dynamic_flags(),
16016 (bytes, handles),
16017 )?;
16018 Ok(SecureMemRequest::_UnknownMethod {
16019 ordinal: header.ordinal,
16020 control_handle: SecureMemControlHandle { inner: this.inner.clone() },
16021 method_type: fidl::MethodType::TwoWay,
16022 })
16023 }
16024 _ => Err(fidl::Error::UnknownOrdinal {
16025 ordinal: header.ordinal,
16026 protocol_name:
16027 <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
16028 }),
16029 }))
16030 },
16031 )
16032 }
16033}
16034
16035/// SecureMem
16036///
16037/// The client is sysmem. The server is securemem driver.
16038///
16039/// TEE - Trusted Execution Environment.
16040///
16041/// REE - Rich Execution Environment.
16042///
16043/// Enables sysmem to call the securemem driver to get any secure heaps
16044/// configured via the TEE (or via the securemem driver), and set any physical
16045/// secure heaps configured via sysmem.
16046///
16047/// Presently, dynamically-allocated secure heaps are configured via sysmem, as
16048/// it starts quite early during boot and can successfully reserve contiguous
16049/// physical memory. Presently, fixed-location secure heaps are configured via
16050/// TEE, as the plumbing goes from the bootloader to the TEE. However, this
16051/// protocol intentionally doesn't care which heaps are dynamically-allocated
16052/// and which are fixed-location.
16053#[derive(Debug)]
16054pub enum SecureMemRequest {
16055 /// Gets the physical address and length of any secure heap whose physical
16056 /// range is configured via the TEE.
16057 ///
16058 /// Presently, these will be fixed physical addresses and lengths, with the
16059 /// location plumbed via the TEE.
16060 ///
16061 /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
16062 /// when there isn't any special heap-specific per-VMO setup or teardown
16063 /// required.
16064 ///
16065 /// The physical range must be secured/protected by the TEE before the
16066 /// securemem driver responds to this request with success.
16067 ///
16068 /// Sysmem should only call this once. Returning zero heaps is not a
16069 /// failure.
16070 ///
16071 /// Errors:
16072 /// * PROTOCOL_DEVIATION - called more than once.
16073 /// * UNSPECIFIED - generic internal error (such as in communication
16074 /// with TEE which doesn't generate zx_status_t errors).
16075 /// * other errors are allowed; any other errors should be treated the same
16076 /// as UNSPECIFIED.
16077 GetPhysicalSecureHeaps { responder: SecureMemGetPhysicalSecureHeapsResponder },
16078 /// Gets information about any secure heaps whose physical pages are not
16079 /// configured by the TEE, but by sysmem.
16080 ///
16081 /// Sysmem should only call this once. Returning zero heaps is not a
16082 /// failure.
16083 ///
16084 /// Errors:
16085 /// * PROTOCOL_DEVIATION - called more than once.
16086 /// * UNSPECIFIED - generic internal error (such as in communication
16087 /// with TEE which doesn't generate zx_status_t errors).
16088 /// * other errors are allowed; any other errors should be treated the same
16089 /// as UNSPECIFIED.
16090 GetDynamicSecureHeaps { responder: SecureMemGetDynamicSecureHeapsResponder },
16091 /// This request from sysmem to the securemem driver gets the properties of
16092 /// a protected/secure heap.
16093 ///
16094 /// This only handles heaps with a single contiguous physical extent.
16095 ///
16096 /// The heap's entire physical range is indicated in case this request needs
16097 /// some physical space to auto-detect how many ranges are REE-usable. Any
16098 /// temporary HW protection ranges will be deleted before this request
16099 /// completes.
16100 ///
16101 /// Errors:
16102 /// * UNSPECIFIED - generic internal error (such as in communication
16103 /// with TEE which doesn't generate zx_status_t errors).
16104 /// * other errors are allowed; any other errors should be treated the same
16105 /// as UNSPECIFIED.
16106 GetPhysicalSecureHeapProperties {
16107 payload: SecureMemGetPhysicalSecureHeapPropertiesRequest,
16108 responder: SecureMemGetPhysicalSecureHeapPropertiesResponder,
16109 },
16110 /// This request from sysmem to the securemem driver conveys a physical
16111 /// range to add, for a heap whose physical range(s) are set up via
16112 /// sysmem.
16113 ///
16114 /// Only sysmem can call this because only sysmem is handed the client end
16115 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
16116 /// securemem driver is the server end of this protocol.
16117 ///
16118 /// The securemem driver must configure all the covered offsets as protected
16119 /// before responding to this message with success.
16120 ///
16121 /// On failure, the securemem driver must ensure the protected range was not
16122 /// created.
16123 ///
16124 /// Sysmem must only call this up to once if dynamic_protection_ranges
16125 /// false.
16126 ///
16127 /// If dynamic_protection_ranges is true, sysmem can call this multiple
16128 /// times as long as the current number of ranges never exceeds
16129 /// max_protected_range_count.
16130 ///
16131 /// The caller must not attempt to add a range that matches an
16132 /// already-existing range. Added ranges can overlap each other as long as
16133 /// no two ranges match exactly.
16134 ///
16135 /// Errors:
16136 /// * PROTOCOL_DEVIATION - called more than once when
16137 /// !dynamic_protection_ranges. Adding a heap that would cause overall
16138 /// heap count to exceed max_protected_range_count. Unexpected heap, or
16139 /// range that doesn't conform to protected_range_granularity. See log.
16140 /// * UNSPECIFIED - generic internal error (such as in communication
16141 /// with TEE which doesn't generate zx_status_t errors).
16142 /// * other errors are possible, such as from communication failures or
16143 /// server propagation of failures.
16144 AddSecureHeapPhysicalRange {
16145 payload: SecureMemAddSecureHeapPhysicalRangeRequest,
16146 responder: SecureMemAddSecureHeapPhysicalRangeResponder,
16147 },
16148 /// This request from sysmem to the securemem driver conveys a physical
16149 /// range to delete, for a heap whose physical range(s) are set up via
16150 /// sysmem.
16151 ///
16152 /// Only sysmem can call this because only sysmem is handed the client end
16153 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
16154 /// securemem driver is the server end of this protocol.
16155 ///
16156 /// The securemem driver must configure all the covered offsets as not
16157 /// protected before responding to this message with success.
16158 ///
16159 /// On failure, the securemem driver must ensure the protected range was not
16160 /// deleted.
16161 ///
16162 /// Sysmem must not call this if dynamic_protection_ranges false.
16163 ///
16164 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16165 /// on various ranges that exist at the time of the call.
16166 ///
16167 /// If any portion of the range being deleted is not also covered by another
16168 /// protected range, then any ongoing DMA to any part of the entire range
16169 /// may be interrupted / may fail, potentially in a way that's disruptive to
16170 /// the entire system (bus lockup or similar, depending on device details).
16171 /// Therefore, the caller must ensure that no ongoing DMA is occurring to
16172 /// any portion of the range being deleted, unless the caller has other
16173 /// active ranges covering every block of the range being deleted. Ongoing
16174 /// DMA to/from blocks outside the range being deleted is never impacted by
16175 /// the deletion.
16176 ///
16177 /// Errors:
16178 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16179 /// Unexpected heap, or range that doesn't conform to
16180 /// protected_range_granularity.
16181 /// * UNSPECIFIED - generic internal error (such as in communication
16182 /// with TEE which doesn't generate zx_status_t errors).
16183 /// * NOT_FOUND - the specified range is not found.
16184 /// * other errors are possible, such as from communication failures or
16185 /// server propagation of failures.
16186 DeleteSecureHeapPhysicalRange {
16187 payload: SecureMemDeleteSecureHeapPhysicalRangeRequest,
16188 responder: SecureMemDeleteSecureHeapPhysicalRangeResponder,
16189 },
16190 /// This request from sysmem to the securemem driver conveys a physical
16191 /// range to modify and its new base and length, for a heap whose physical
16192 /// range(s) are set up via sysmem.
16193 ///
16194 /// Only sysmem can call this because only sysmem is handed the client end
16195 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
16196 /// securemem driver is the server end of this protocol.
16197 ///
16198 /// The securemem driver must configure the range to cover only the new
16199 /// offsets before responding to this message with success.
16200 ///
16201 /// On failure, the securemem driver must ensure the range was not changed.
16202 ///
16203 /// Sysmem must not call this if dynamic_protection_ranges false. Sysmem
16204 /// must not call this if !is_mod_protected_range_available.
16205 ///
16206 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16207 /// on various ranges that exist at the time of the call.
16208 ///
16209 /// The range must only be modified at one end or the other, but not both.
16210 /// If the range is getting shorter, and the un-covered blocks are not
16211 /// covered by other active ranges, any ongoing DMA to the entire range
16212 /// that's geting shorter may fail in a way that disrupts the entire system
16213 /// (bus lockup or similar), so the caller must ensure that no DMA is
16214 /// ongoing to any portion of a range that is getting shorter, unless the
16215 /// blocks being un-covered by the modification to this range are all
16216 /// covered by other active ranges, in which case no disruption to ongoing
16217 /// DMA will occur.
16218 ///
16219 /// If a range is modified to become <= zero length, the range is deleted.
16220 ///
16221 /// Errors:
16222 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16223 /// Unexpected heap, or old_range or new_range that doesn't conform to
16224 /// protected_range_granularity, or old_range and new_range differ in
16225 /// both begin and end (disallowed).
16226 /// * UNSPECIFIED - generic internal error (such as in communication
16227 /// with TEE which doesn't generate zx_status_t errors).
16228 /// * NOT_FOUND - the specified range is not found.
16229 /// * other errors are possible, such as from communication failures or
16230 /// server propagation of failures.
16231 ModifySecureHeapPhysicalRange {
16232 payload: SecureMemModifySecureHeapPhysicalRangeRequest,
16233 responder: SecureMemModifySecureHeapPhysicalRangeResponder,
16234 },
16235 /// Zero a sub-range of a currently-existing physical range added via
16236 /// AddSecureHeapPhysicalRange(). The sub-range must be fully covered by
16237 /// exactly one physical range, and must not overlap with any other
16238 /// physical range.
16239 ///
16240 /// is_covering_range_explicit - When true, the covering range must be one
16241 /// of the ranges explicitly created via AddSecureHeapPhysicalRange(),
16242 /// possibly modified since. When false, the covering range must not
16243 /// be one of the ranges explicitly created via
16244 /// AddSecureHeapPhysicalRange(), but the covering range must exist as
16245 /// a covering range not created via AddSecureHeapPhysicalRange(). The
16246 /// covering range is typically the entire physical range (or a range
16247 /// which covers even more) of a heap configured by the TEE and whose
16248 /// configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
16249 ///
16250 /// Ongoing DMA is not disrupted by this request.
16251 ///
16252 /// Errors:
16253 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16254 /// Unexpected heap.
16255 /// * UNSPECIFIED - generic internal error (such as in communication
16256 /// with TEE which doesn't generate zx_status_t errors).
16257 /// * other errors are possible, such as from communication failures or
16258 /// server propagation of failures.
16259 ZeroSubRange {
16260 payload: SecureMemZeroSubRangeRequest,
16261 responder: SecureMemZeroSubRangeResponder,
16262 },
16263 /// An interaction was received which does not match any known method.
16264 #[non_exhaustive]
16265 _UnknownMethod {
16266 /// Ordinal of the method that was called.
16267 ordinal: u64,
16268 control_handle: SecureMemControlHandle,
16269 method_type: fidl::MethodType,
16270 },
16271}
16272
16273impl SecureMemRequest {
16274 #[allow(irrefutable_let_patterns)]
16275 pub fn into_get_physical_secure_heaps(
16276 self,
16277 ) -> Option<(SecureMemGetPhysicalSecureHeapsResponder)> {
16278 if let SecureMemRequest::GetPhysicalSecureHeaps { responder } = self {
16279 Some((responder))
16280 } else {
16281 None
16282 }
16283 }
16284
16285 #[allow(irrefutable_let_patterns)]
16286 pub fn into_get_dynamic_secure_heaps(
16287 self,
16288 ) -> Option<(SecureMemGetDynamicSecureHeapsResponder)> {
16289 if let SecureMemRequest::GetDynamicSecureHeaps { responder } = self {
16290 Some((responder))
16291 } else {
16292 None
16293 }
16294 }
16295
16296 #[allow(irrefutable_let_patterns)]
16297 pub fn into_get_physical_secure_heap_properties(
16298 self,
16299 ) -> Option<(
16300 SecureMemGetPhysicalSecureHeapPropertiesRequest,
16301 SecureMemGetPhysicalSecureHeapPropertiesResponder,
16302 )> {
16303 if let SecureMemRequest::GetPhysicalSecureHeapProperties { payload, responder } = self {
16304 Some((payload, responder))
16305 } else {
16306 None
16307 }
16308 }
16309
16310 #[allow(irrefutable_let_patterns)]
16311 pub fn into_add_secure_heap_physical_range(
16312 self,
16313 ) -> Option<(
16314 SecureMemAddSecureHeapPhysicalRangeRequest,
16315 SecureMemAddSecureHeapPhysicalRangeResponder,
16316 )> {
16317 if let SecureMemRequest::AddSecureHeapPhysicalRange { payload, responder } = self {
16318 Some((payload, responder))
16319 } else {
16320 None
16321 }
16322 }
16323
16324 #[allow(irrefutable_let_patterns)]
16325 pub fn into_delete_secure_heap_physical_range(
16326 self,
16327 ) -> Option<(
16328 SecureMemDeleteSecureHeapPhysicalRangeRequest,
16329 SecureMemDeleteSecureHeapPhysicalRangeResponder,
16330 )> {
16331 if let SecureMemRequest::DeleteSecureHeapPhysicalRange { payload, responder } = self {
16332 Some((payload, responder))
16333 } else {
16334 None
16335 }
16336 }
16337
16338 #[allow(irrefutable_let_patterns)]
16339 pub fn into_modify_secure_heap_physical_range(
16340 self,
16341 ) -> Option<(
16342 SecureMemModifySecureHeapPhysicalRangeRequest,
16343 SecureMemModifySecureHeapPhysicalRangeResponder,
16344 )> {
16345 if let SecureMemRequest::ModifySecureHeapPhysicalRange { payload, responder } = self {
16346 Some((payload, responder))
16347 } else {
16348 None
16349 }
16350 }
16351
16352 #[allow(irrefutable_let_patterns)]
16353 pub fn into_zero_sub_range(
16354 self,
16355 ) -> Option<(SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResponder)> {
16356 if let SecureMemRequest::ZeroSubRange { payload, responder } = self {
16357 Some((payload, responder))
16358 } else {
16359 None
16360 }
16361 }
16362
16363 /// Name of the method defined in FIDL
16364 pub fn method_name(&self) -> &'static str {
16365 match *self {
16366 SecureMemRequest::GetPhysicalSecureHeaps { .. } => "get_physical_secure_heaps",
16367 SecureMemRequest::GetDynamicSecureHeaps { .. } => "get_dynamic_secure_heaps",
16368 SecureMemRequest::GetPhysicalSecureHeapProperties { .. } => {
16369 "get_physical_secure_heap_properties"
16370 }
16371 SecureMemRequest::AddSecureHeapPhysicalRange { .. } => "add_secure_heap_physical_range",
16372 SecureMemRequest::DeleteSecureHeapPhysicalRange { .. } => {
16373 "delete_secure_heap_physical_range"
16374 }
16375 SecureMemRequest::ModifySecureHeapPhysicalRange { .. } => {
16376 "modify_secure_heap_physical_range"
16377 }
16378 SecureMemRequest::ZeroSubRange { .. } => "zero_sub_range",
16379 SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
16380 "unknown one-way method"
16381 }
16382 SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
16383 "unknown two-way method"
16384 }
16385 }
16386 }
16387}
16388
16389#[derive(Debug, Clone)]
16390pub struct SecureMemControlHandle {
16391 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
16392}
16393
16394impl fidl::endpoints::ControlHandle for SecureMemControlHandle {
16395 fn shutdown(&self) {
16396 self.inner.shutdown()
16397 }
16398
16399 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
16400 self.inner.shutdown_with_epitaph(status)
16401 }
16402
16403 fn is_closed(&self) -> bool {
16404 self.inner.channel().is_closed()
16405 }
16406 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
16407 self.inner.channel().on_closed()
16408 }
16409
16410 #[cfg(target_os = "fuchsia")]
16411 fn signal_peer(
16412 &self,
16413 clear_mask: zx::Signals,
16414 set_mask: zx::Signals,
16415 ) -> Result<(), zx_status::Status> {
16416 use fidl::Peered;
16417 self.inner.channel().signal_peer(clear_mask, set_mask)
16418 }
16419}
16420
16421impl SecureMemControlHandle {}
16422
16423#[must_use = "FIDL methods require a response to be sent"]
16424#[derive(Debug)]
16425pub struct SecureMemGetPhysicalSecureHeapsResponder {
16426 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16427 tx_id: u32,
16428}
16429
16430/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16431/// if the responder is dropped without sending a response, so that the client
16432/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16433impl std::ops::Drop for SecureMemGetPhysicalSecureHeapsResponder {
16434 fn drop(&mut self) {
16435 self.control_handle.shutdown();
16436 // Safety: drops once, never accessed again
16437 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16438 }
16439}
16440
16441impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapsResponder {
16442 type ControlHandle = SecureMemControlHandle;
16443
16444 fn control_handle(&self) -> &SecureMemControlHandle {
16445 &self.control_handle
16446 }
16447
16448 fn drop_without_shutdown(mut self) {
16449 // Safety: drops once, never accessed again due to mem::forget
16450 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16451 // Prevent Drop from running (which would shut down the channel)
16452 std::mem::forget(self);
16453 }
16454}
16455
16456impl SecureMemGetPhysicalSecureHeapsResponder {
16457 /// Sends a response to the FIDL transaction.
16458 ///
16459 /// Sets the channel to shutdown if an error occurs.
16460 pub fn send(
16461 self,
16462 mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16463 ) -> Result<(), fidl::Error> {
16464 let _result = self.send_raw(result);
16465 if _result.is_err() {
16466 self.control_handle.shutdown();
16467 }
16468 self.drop_without_shutdown();
16469 _result
16470 }
16471
16472 /// Similar to "send" but does not shutdown the channel if an error occurs.
16473 pub fn send_no_shutdown_on_err(
16474 self,
16475 mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16476 ) -> Result<(), fidl::Error> {
16477 let _result = self.send_raw(result);
16478 self.drop_without_shutdown();
16479 _result
16480 }
16481
16482 fn send_raw(
16483 &self,
16484 mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16485 ) -> Result<(), fidl::Error> {
16486 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16487 SecureMemGetPhysicalSecureHeapsResponse,
16488 Error,
16489 >>(
16490 fidl::encoding::FlexibleResult::new(result),
16491 self.tx_id,
16492 0x38716300592073e3,
16493 fidl::encoding::DynamicFlags::FLEXIBLE,
16494 )
16495 }
16496}
16497
16498#[must_use = "FIDL methods require a response to be sent"]
16499#[derive(Debug)]
16500pub struct SecureMemGetDynamicSecureHeapsResponder {
16501 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16502 tx_id: u32,
16503}
16504
16505/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16506/// if the responder is dropped without sending a response, so that the client
16507/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16508impl std::ops::Drop for SecureMemGetDynamicSecureHeapsResponder {
16509 fn drop(&mut self) {
16510 self.control_handle.shutdown();
16511 // Safety: drops once, never accessed again
16512 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16513 }
16514}
16515
16516impl fidl::endpoints::Responder for SecureMemGetDynamicSecureHeapsResponder {
16517 type ControlHandle = SecureMemControlHandle;
16518
16519 fn control_handle(&self) -> &SecureMemControlHandle {
16520 &self.control_handle
16521 }
16522
16523 fn drop_without_shutdown(mut self) {
16524 // Safety: drops once, never accessed again due to mem::forget
16525 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16526 // Prevent Drop from running (which would shut down the channel)
16527 std::mem::forget(self);
16528 }
16529}
16530
16531impl SecureMemGetDynamicSecureHeapsResponder {
16532 /// Sends a response to the FIDL transaction.
16533 ///
16534 /// Sets the channel to shutdown if an error occurs.
16535 pub fn send(
16536 self,
16537 mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16538 ) -> Result<(), fidl::Error> {
16539 let _result = self.send_raw(result);
16540 if _result.is_err() {
16541 self.control_handle.shutdown();
16542 }
16543 self.drop_without_shutdown();
16544 _result
16545 }
16546
16547 /// Similar to "send" but does not shutdown the channel if an error occurs.
16548 pub fn send_no_shutdown_on_err(
16549 self,
16550 mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16551 ) -> Result<(), fidl::Error> {
16552 let _result = self.send_raw(result);
16553 self.drop_without_shutdown();
16554 _result
16555 }
16556
16557 fn send_raw(
16558 &self,
16559 mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16560 ) -> Result<(), fidl::Error> {
16561 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16562 SecureMemGetDynamicSecureHeapsResponse,
16563 Error,
16564 >>(
16565 fidl::encoding::FlexibleResult::new(result),
16566 self.tx_id,
16567 0x1190847f99952834,
16568 fidl::encoding::DynamicFlags::FLEXIBLE,
16569 )
16570 }
16571}
16572
16573#[must_use = "FIDL methods require a response to be sent"]
16574#[derive(Debug)]
16575pub struct SecureMemGetPhysicalSecureHeapPropertiesResponder {
16576 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16577 tx_id: u32,
16578}
16579
16580/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16581/// if the responder is dropped without sending a response, so that the client
16582/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16583impl std::ops::Drop for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16584 fn drop(&mut self) {
16585 self.control_handle.shutdown();
16586 // Safety: drops once, never accessed again
16587 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16588 }
16589}
16590
16591impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16592 type ControlHandle = SecureMemControlHandle;
16593
16594 fn control_handle(&self) -> &SecureMemControlHandle {
16595 &self.control_handle
16596 }
16597
16598 fn drop_without_shutdown(mut self) {
16599 // Safety: drops once, never accessed again due to mem::forget
16600 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16601 // Prevent Drop from running (which would shut down the channel)
16602 std::mem::forget(self);
16603 }
16604}
16605
16606impl SecureMemGetPhysicalSecureHeapPropertiesResponder {
16607 /// Sends a response to the FIDL transaction.
16608 ///
16609 /// Sets the channel to shutdown if an error occurs.
16610 pub fn send(
16611 self,
16612 mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16613 ) -> Result<(), fidl::Error> {
16614 let _result = self.send_raw(result);
16615 if _result.is_err() {
16616 self.control_handle.shutdown();
16617 }
16618 self.drop_without_shutdown();
16619 _result
16620 }
16621
16622 /// Similar to "send" but does not shutdown the channel if an error occurs.
16623 pub fn send_no_shutdown_on_err(
16624 self,
16625 mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16626 ) -> Result<(), fidl::Error> {
16627 let _result = self.send_raw(result);
16628 self.drop_without_shutdown();
16629 _result
16630 }
16631
16632 fn send_raw(
16633 &self,
16634 mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16635 ) -> Result<(), fidl::Error> {
16636 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16637 SecureMemGetPhysicalSecureHeapPropertiesResponse,
16638 Error,
16639 >>(
16640 fidl::encoding::FlexibleResult::new(result),
16641 self.tx_id,
16642 0xc6f06889009c7bc,
16643 fidl::encoding::DynamicFlags::FLEXIBLE,
16644 )
16645 }
16646}
16647
16648#[must_use = "FIDL methods require a response to be sent"]
16649#[derive(Debug)]
16650pub struct SecureMemAddSecureHeapPhysicalRangeResponder {
16651 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16652 tx_id: u32,
16653}
16654
16655/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16656/// if the responder is dropped without sending a response, so that the client
16657/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16658impl std::ops::Drop for SecureMemAddSecureHeapPhysicalRangeResponder {
16659 fn drop(&mut self) {
16660 self.control_handle.shutdown();
16661 // Safety: drops once, never accessed again
16662 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16663 }
16664}
16665
16666impl fidl::endpoints::Responder for SecureMemAddSecureHeapPhysicalRangeResponder {
16667 type ControlHandle = SecureMemControlHandle;
16668
16669 fn control_handle(&self) -> &SecureMemControlHandle {
16670 &self.control_handle
16671 }
16672
16673 fn drop_without_shutdown(mut self) {
16674 // Safety: drops once, never accessed again due to mem::forget
16675 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16676 // Prevent Drop from running (which would shut down the channel)
16677 std::mem::forget(self);
16678 }
16679}
16680
16681impl SecureMemAddSecureHeapPhysicalRangeResponder {
16682 /// Sends a response to the FIDL transaction.
16683 ///
16684 /// Sets the channel to shutdown if an error occurs.
16685 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16686 let _result = self.send_raw(result);
16687 if _result.is_err() {
16688 self.control_handle.shutdown();
16689 }
16690 self.drop_without_shutdown();
16691 _result
16692 }
16693
16694 /// Similar to "send" but does not shutdown the channel if an error occurs.
16695 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16696 let _result = self.send_raw(result);
16697 self.drop_without_shutdown();
16698 _result
16699 }
16700
16701 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16702 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16703 fidl::encoding::EmptyStruct,
16704 Error,
16705 >>(
16706 fidl::encoding::FlexibleResult::new(result),
16707 self.tx_id,
16708 0x35f695b9b6c7217a,
16709 fidl::encoding::DynamicFlags::FLEXIBLE,
16710 )
16711 }
16712}
16713
16714#[must_use = "FIDL methods require a response to be sent"]
16715#[derive(Debug)]
16716pub struct SecureMemDeleteSecureHeapPhysicalRangeResponder {
16717 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16718 tx_id: u32,
16719}
16720
16721/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16722/// if the responder is dropped without sending a response, so that the client
16723/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16724impl std::ops::Drop for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16725 fn drop(&mut self) {
16726 self.control_handle.shutdown();
16727 // Safety: drops once, never accessed again
16728 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16729 }
16730}
16731
16732impl fidl::endpoints::Responder for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16733 type ControlHandle = SecureMemControlHandle;
16734
16735 fn control_handle(&self) -> &SecureMemControlHandle {
16736 &self.control_handle
16737 }
16738
16739 fn drop_without_shutdown(mut self) {
16740 // Safety: drops once, never accessed again due to mem::forget
16741 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16742 // Prevent Drop from running (which would shut down the channel)
16743 std::mem::forget(self);
16744 }
16745}
16746
16747impl SecureMemDeleteSecureHeapPhysicalRangeResponder {
16748 /// Sends a response to the FIDL transaction.
16749 ///
16750 /// Sets the channel to shutdown if an error occurs.
16751 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16752 let _result = self.send_raw(result);
16753 if _result.is_err() {
16754 self.control_handle.shutdown();
16755 }
16756 self.drop_without_shutdown();
16757 _result
16758 }
16759
16760 /// Similar to "send" but does not shutdown the channel if an error occurs.
16761 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16762 let _result = self.send_raw(result);
16763 self.drop_without_shutdown();
16764 _result
16765 }
16766
16767 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16768 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16769 fidl::encoding::EmptyStruct,
16770 Error,
16771 >>(
16772 fidl::encoding::FlexibleResult::new(result),
16773 self.tx_id,
16774 0xeaa58c650264c9e,
16775 fidl::encoding::DynamicFlags::FLEXIBLE,
16776 )
16777 }
16778}
16779
16780#[must_use = "FIDL methods require a response to be sent"]
16781#[derive(Debug)]
16782pub struct SecureMemModifySecureHeapPhysicalRangeResponder {
16783 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16784 tx_id: u32,
16785}
16786
16787/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16788/// if the responder is dropped without sending a response, so that the client
16789/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16790impl std::ops::Drop for SecureMemModifySecureHeapPhysicalRangeResponder {
16791 fn drop(&mut self) {
16792 self.control_handle.shutdown();
16793 // Safety: drops once, never accessed again
16794 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16795 }
16796}
16797
16798impl fidl::endpoints::Responder for SecureMemModifySecureHeapPhysicalRangeResponder {
16799 type ControlHandle = SecureMemControlHandle;
16800
16801 fn control_handle(&self) -> &SecureMemControlHandle {
16802 &self.control_handle
16803 }
16804
16805 fn drop_without_shutdown(mut self) {
16806 // Safety: drops once, never accessed again due to mem::forget
16807 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16808 // Prevent Drop from running (which would shut down the channel)
16809 std::mem::forget(self);
16810 }
16811}
16812
16813impl SecureMemModifySecureHeapPhysicalRangeResponder {
16814 /// Sends a response to the FIDL transaction.
16815 ///
16816 /// Sets the channel to shutdown if an error occurs.
16817 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16818 let _result = self.send_raw(result);
16819 if _result.is_err() {
16820 self.control_handle.shutdown();
16821 }
16822 self.drop_without_shutdown();
16823 _result
16824 }
16825
16826 /// Similar to "send" but does not shutdown the channel if an error occurs.
16827 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16828 let _result = self.send_raw(result);
16829 self.drop_without_shutdown();
16830 _result
16831 }
16832
16833 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16834 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16835 fidl::encoding::EmptyStruct,
16836 Error,
16837 >>(
16838 fidl::encoding::FlexibleResult::new(result),
16839 self.tx_id,
16840 0x60b7448aa1187734,
16841 fidl::encoding::DynamicFlags::FLEXIBLE,
16842 )
16843 }
16844}
16845
16846#[must_use = "FIDL methods require a response to be sent"]
16847#[derive(Debug)]
16848pub struct SecureMemZeroSubRangeResponder {
16849 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16850 tx_id: u32,
16851}
16852
16853/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16854/// if the responder is dropped without sending a response, so that the client
16855/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16856impl std::ops::Drop for SecureMemZeroSubRangeResponder {
16857 fn drop(&mut self) {
16858 self.control_handle.shutdown();
16859 // Safety: drops once, never accessed again
16860 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16861 }
16862}
16863
16864impl fidl::endpoints::Responder for SecureMemZeroSubRangeResponder {
16865 type ControlHandle = SecureMemControlHandle;
16866
16867 fn control_handle(&self) -> &SecureMemControlHandle {
16868 &self.control_handle
16869 }
16870
16871 fn drop_without_shutdown(mut self) {
16872 // Safety: drops once, never accessed again due to mem::forget
16873 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16874 // Prevent Drop from running (which would shut down the channel)
16875 std::mem::forget(self);
16876 }
16877}
16878
16879impl SecureMemZeroSubRangeResponder {
16880 /// Sends a response to the FIDL transaction.
16881 ///
16882 /// Sets the channel to shutdown if an error occurs.
16883 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16884 let _result = self.send_raw(result);
16885 if _result.is_err() {
16886 self.control_handle.shutdown();
16887 }
16888 self.drop_without_shutdown();
16889 _result
16890 }
16891
16892 /// Similar to "send" but does not shutdown the channel if an error occurs.
16893 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16894 let _result = self.send_raw(result);
16895 self.drop_without_shutdown();
16896 _result
16897 }
16898
16899 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16900 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16901 fidl::encoding::EmptyStruct,
16902 Error,
16903 >>(
16904 fidl::encoding::FlexibleResult::new(result),
16905 self.tx_id,
16906 0x5b25b7901a385ce5,
16907 fidl::encoding::DynamicFlags::FLEXIBLE,
16908 )
16909 }
16910}
16911
16912mod internal {
16913 use super::*;
16914
16915 impl AllocatorAllocateNonSharedCollectionRequest {
16916 #[inline(always)]
16917 fn max_ordinal_present(&self) -> u64 {
16918 if let Some(_) = self.collection_request {
16919 return 1;
16920 }
16921 0
16922 }
16923 }
16924
16925 impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16926 type Borrowed<'a> = &'a mut Self;
16927 fn take_or_borrow<'a>(
16928 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
16929 ) -> Self::Borrowed<'a> {
16930 value
16931 }
16932 }
16933
16934 unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16935 type Owned = Self;
16936
16937 #[inline(always)]
16938 fn inline_align(_context: fidl::encoding::Context) -> usize {
16939 8
16940 }
16941
16942 #[inline(always)]
16943 fn inline_size(_context: fidl::encoding::Context) -> usize {
16944 16
16945 }
16946 }
16947
16948 unsafe impl
16949 fidl::encoding::Encode<
16950 AllocatorAllocateNonSharedCollectionRequest,
16951 fidl::encoding::DefaultFuchsiaResourceDialect,
16952 > for &mut AllocatorAllocateNonSharedCollectionRequest
16953 {
16954 unsafe fn encode(
16955 self,
16956 encoder: &mut fidl::encoding::Encoder<
16957 '_,
16958 fidl::encoding::DefaultFuchsiaResourceDialect,
16959 >,
16960 offset: usize,
16961 mut depth: fidl::encoding::Depth,
16962 ) -> fidl::Result<()> {
16963 encoder.debug_check_bounds::<AllocatorAllocateNonSharedCollectionRequest>(offset);
16964 // Vector header
16965 let max_ordinal: u64 = self.max_ordinal_present();
16966 encoder.write_num(max_ordinal, offset);
16967 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
16968 // Calling encoder.out_of_line_offset(0) is not allowed.
16969 if max_ordinal == 0 {
16970 return Ok(());
16971 }
16972 depth.increment()?;
16973 let envelope_size = 8;
16974 let bytes_len = max_ordinal as usize * envelope_size;
16975 #[allow(unused_variables)]
16976 let offset = encoder.out_of_line_offset(bytes_len);
16977 let mut _prev_end_offset: usize = 0;
16978 if 1 > max_ordinal {
16979 return Ok(());
16980 }
16981
16982 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
16983 // are envelope_size bytes.
16984 let cur_offset: usize = (1 - 1) * envelope_size;
16985
16986 // Zero reserved fields.
16987 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
16988
16989 // Safety:
16990 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
16991 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
16992 // envelope_size bytes, there is always sufficient room.
16993 fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
16994 self.collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
16995 encoder, offset + cur_offset, depth
16996 )?;
16997
16998 _prev_end_offset = cur_offset + envelope_size;
16999
17000 Ok(())
17001 }
17002 }
17003
17004 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17005 for AllocatorAllocateNonSharedCollectionRequest
17006 {
17007 #[inline(always)]
17008 fn new_empty() -> Self {
17009 Self::default()
17010 }
17011
17012 unsafe fn decode(
17013 &mut self,
17014 decoder: &mut fidl::encoding::Decoder<
17015 '_,
17016 fidl::encoding::DefaultFuchsiaResourceDialect,
17017 >,
17018 offset: usize,
17019 mut depth: fidl::encoding::Depth,
17020 ) -> fidl::Result<()> {
17021 decoder.debug_check_bounds::<Self>(offset);
17022 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17023 None => return Err(fidl::Error::NotNullable),
17024 Some(len) => len,
17025 };
17026 // Calling decoder.out_of_line_offset(0) is not allowed.
17027 if len == 0 {
17028 return Ok(());
17029 };
17030 depth.increment()?;
17031 let envelope_size = 8;
17032 let bytes_len = len * envelope_size;
17033 let offset = decoder.out_of_line_offset(bytes_len)?;
17034 // Decode the envelope for each type.
17035 let mut _next_ordinal_to_read = 0;
17036 let mut next_offset = offset;
17037 let end_offset = offset + bytes_len;
17038 _next_ordinal_to_read += 1;
17039 if next_offset >= end_offset {
17040 return Ok(());
17041 }
17042
17043 // Decode unknown envelopes for gaps in ordinals.
17044 while _next_ordinal_to_read < 1 {
17045 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17046 _next_ordinal_to_read += 1;
17047 next_offset += envelope_size;
17048 }
17049
17050 let next_out_of_line = decoder.next_out_of_line();
17051 let handles_before = decoder.remaining_handles();
17052 if let Some((inlined, num_bytes, num_handles)) =
17053 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17054 {
17055 let member_inline_size = <fidl::encoding::Endpoint<
17056 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17057 > as fidl::encoding::TypeMarker>::inline_size(
17058 decoder.context
17059 );
17060 if inlined != (member_inline_size <= 4) {
17061 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17062 }
17063 let inner_offset;
17064 let mut inner_depth = depth.clone();
17065 if inlined {
17066 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17067 inner_offset = next_offset;
17068 } else {
17069 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17070 inner_depth.increment()?;
17071 }
17072 let val_ref = self.collection_request.get_or_insert_with(|| {
17073 fidl::new_empty!(
17074 fidl::encoding::Endpoint<
17075 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17076 >,
17077 fidl::encoding::DefaultFuchsiaResourceDialect
17078 )
17079 });
17080 fidl::decode!(
17081 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17082 fidl::encoding::DefaultFuchsiaResourceDialect,
17083 val_ref,
17084 decoder,
17085 inner_offset,
17086 inner_depth
17087 )?;
17088 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17089 {
17090 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17091 }
17092 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17093 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17094 }
17095 }
17096
17097 next_offset += envelope_size;
17098
17099 // Decode the remaining unknown envelopes.
17100 while next_offset < end_offset {
17101 _next_ordinal_to_read += 1;
17102 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17103 next_offset += envelope_size;
17104 }
17105
17106 Ok(())
17107 }
17108 }
17109
17110 impl AllocatorAllocateSharedCollectionRequest {
17111 #[inline(always)]
17112 fn max_ordinal_present(&self) -> u64 {
17113 if let Some(_) = self.token_request {
17114 return 1;
17115 }
17116 0
17117 }
17118 }
17119
17120 impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateSharedCollectionRequest {
17121 type Borrowed<'a> = &'a mut Self;
17122 fn take_or_borrow<'a>(
17123 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17124 ) -> Self::Borrowed<'a> {
17125 value
17126 }
17127 }
17128
17129 unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateSharedCollectionRequest {
17130 type Owned = Self;
17131
17132 #[inline(always)]
17133 fn inline_align(_context: fidl::encoding::Context) -> usize {
17134 8
17135 }
17136
17137 #[inline(always)]
17138 fn inline_size(_context: fidl::encoding::Context) -> usize {
17139 16
17140 }
17141 }
17142
17143 unsafe impl
17144 fidl::encoding::Encode<
17145 AllocatorAllocateSharedCollectionRequest,
17146 fidl::encoding::DefaultFuchsiaResourceDialect,
17147 > for &mut AllocatorAllocateSharedCollectionRequest
17148 {
17149 unsafe fn encode(
17150 self,
17151 encoder: &mut fidl::encoding::Encoder<
17152 '_,
17153 fidl::encoding::DefaultFuchsiaResourceDialect,
17154 >,
17155 offset: usize,
17156 mut depth: fidl::encoding::Depth,
17157 ) -> fidl::Result<()> {
17158 encoder.debug_check_bounds::<AllocatorAllocateSharedCollectionRequest>(offset);
17159 // Vector header
17160 let max_ordinal: u64 = self.max_ordinal_present();
17161 encoder.write_num(max_ordinal, offset);
17162 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17163 // Calling encoder.out_of_line_offset(0) is not allowed.
17164 if max_ordinal == 0 {
17165 return Ok(());
17166 }
17167 depth.increment()?;
17168 let envelope_size = 8;
17169 let bytes_len = max_ordinal as usize * envelope_size;
17170 #[allow(unused_variables)]
17171 let offset = encoder.out_of_line_offset(bytes_len);
17172 let mut _prev_end_offset: usize = 0;
17173 if 1 > max_ordinal {
17174 return Ok(());
17175 }
17176
17177 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17178 // are envelope_size bytes.
17179 let cur_offset: usize = (1 - 1) * envelope_size;
17180
17181 // Zero reserved fields.
17182 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17183
17184 // Safety:
17185 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17186 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17187 // envelope_size bytes, there is always sufficient room.
17188 fidl::encoding::encode_in_envelope_optional::<
17189 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
17190 fidl::encoding::DefaultFuchsiaResourceDialect,
17191 >(
17192 self.token_request.as_mut().map(
17193 <fidl::encoding::Endpoint<
17194 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17195 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17196 ),
17197 encoder,
17198 offset + cur_offset,
17199 depth,
17200 )?;
17201
17202 _prev_end_offset = cur_offset + envelope_size;
17203
17204 Ok(())
17205 }
17206 }
17207
17208 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17209 for AllocatorAllocateSharedCollectionRequest
17210 {
17211 #[inline(always)]
17212 fn new_empty() -> Self {
17213 Self::default()
17214 }
17215
17216 unsafe fn decode(
17217 &mut self,
17218 decoder: &mut fidl::encoding::Decoder<
17219 '_,
17220 fidl::encoding::DefaultFuchsiaResourceDialect,
17221 >,
17222 offset: usize,
17223 mut depth: fidl::encoding::Depth,
17224 ) -> fidl::Result<()> {
17225 decoder.debug_check_bounds::<Self>(offset);
17226 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17227 None => return Err(fidl::Error::NotNullable),
17228 Some(len) => len,
17229 };
17230 // Calling decoder.out_of_line_offset(0) is not allowed.
17231 if len == 0 {
17232 return Ok(());
17233 };
17234 depth.increment()?;
17235 let envelope_size = 8;
17236 let bytes_len = len * envelope_size;
17237 let offset = decoder.out_of_line_offset(bytes_len)?;
17238 // Decode the envelope for each type.
17239 let mut _next_ordinal_to_read = 0;
17240 let mut next_offset = offset;
17241 let end_offset = offset + bytes_len;
17242 _next_ordinal_to_read += 1;
17243 if next_offset >= end_offset {
17244 return Ok(());
17245 }
17246
17247 // Decode unknown envelopes for gaps in ordinals.
17248 while _next_ordinal_to_read < 1 {
17249 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17250 _next_ordinal_to_read += 1;
17251 next_offset += envelope_size;
17252 }
17253
17254 let next_out_of_line = decoder.next_out_of_line();
17255 let handles_before = decoder.remaining_handles();
17256 if let Some((inlined, num_bytes, num_handles)) =
17257 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17258 {
17259 let member_inline_size = <fidl::encoding::Endpoint<
17260 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17261 > as fidl::encoding::TypeMarker>::inline_size(
17262 decoder.context
17263 );
17264 if inlined != (member_inline_size <= 4) {
17265 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17266 }
17267 let inner_offset;
17268 let mut inner_depth = depth.clone();
17269 if inlined {
17270 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17271 inner_offset = next_offset;
17272 } else {
17273 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17274 inner_depth.increment()?;
17275 }
17276 let val_ref = self.token_request.get_or_insert_with(|| {
17277 fidl::new_empty!(
17278 fidl::encoding::Endpoint<
17279 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17280 >,
17281 fidl::encoding::DefaultFuchsiaResourceDialect
17282 )
17283 });
17284 fidl::decode!(
17285 fidl::encoding::Endpoint<
17286 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17287 >,
17288 fidl::encoding::DefaultFuchsiaResourceDialect,
17289 val_ref,
17290 decoder,
17291 inner_offset,
17292 inner_depth
17293 )?;
17294 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17295 {
17296 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17297 }
17298 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17299 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17300 }
17301 }
17302
17303 next_offset += envelope_size;
17304
17305 // Decode the remaining unknown envelopes.
17306 while next_offset < end_offset {
17307 _next_ordinal_to_read += 1;
17308 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17309 next_offset += envelope_size;
17310 }
17311
17312 Ok(())
17313 }
17314 }
17315
17316 impl AllocatorBindSharedCollectionRequest {
17317 #[inline(always)]
17318 fn max_ordinal_present(&self) -> u64 {
17319 if let Some(_) = self.buffer_collection_request {
17320 return 2;
17321 }
17322 if let Some(_) = self.token {
17323 return 1;
17324 }
17325 0
17326 }
17327 }
17328
17329 impl fidl::encoding::ResourceTypeMarker for AllocatorBindSharedCollectionRequest {
17330 type Borrowed<'a> = &'a mut Self;
17331 fn take_or_borrow<'a>(
17332 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17333 ) -> Self::Borrowed<'a> {
17334 value
17335 }
17336 }
17337
17338 unsafe impl fidl::encoding::TypeMarker for AllocatorBindSharedCollectionRequest {
17339 type Owned = Self;
17340
17341 #[inline(always)]
17342 fn inline_align(_context: fidl::encoding::Context) -> usize {
17343 8
17344 }
17345
17346 #[inline(always)]
17347 fn inline_size(_context: fidl::encoding::Context) -> usize {
17348 16
17349 }
17350 }
17351
17352 unsafe impl
17353 fidl::encoding::Encode<
17354 AllocatorBindSharedCollectionRequest,
17355 fidl::encoding::DefaultFuchsiaResourceDialect,
17356 > for &mut AllocatorBindSharedCollectionRequest
17357 {
17358 unsafe fn encode(
17359 self,
17360 encoder: &mut fidl::encoding::Encoder<
17361 '_,
17362 fidl::encoding::DefaultFuchsiaResourceDialect,
17363 >,
17364 offset: usize,
17365 mut depth: fidl::encoding::Depth,
17366 ) -> fidl::Result<()> {
17367 encoder.debug_check_bounds::<AllocatorBindSharedCollectionRequest>(offset);
17368 // Vector header
17369 let max_ordinal: u64 = self.max_ordinal_present();
17370 encoder.write_num(max_ordinal, offset);
17371 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17372 // Calling encoder.out_of_line_offset(0) is not allowed.
17373 if max_ordinal == 0 {
17374 return Ok(());
17375 }
17376 depth.increment()?;
17377 let envelope_size = 8;
17378 let bytes_len = max_ordinal as usize * envelope_size;
17379 #[allow(unused_variables)]
17380 let offset = encoder.out_of_line_offset(bytes_len);
17381 let mut _prev_end_offset: usize = 0;
17382 if 1 > max_ordinal {
17383 return Ok(());
17384 }
17385
17386 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17387 // are envelope_size bytes.
17388 let cur_offset: usize = (1 - 1) * envelope_size;
17389
17390 // Zero reserved fields.
17391 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17392
17393 // Safety:
17394 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17395 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17396 // envelope_size bytes, there is always sufficient room.
17397 fidl::encoding::encode_in_envelope_optional::<
17398 fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
17399 fidl::encoding::DefaultFuchsiaResourceDialect,
17400 >(
17401 self.token.as_mut().map(
17402 <fidl::encoding::Endpoint<
17403 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17404 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17405 ),
17406 encoder,
17407 offset + cur_offset,
17408 depth,
17409 )?;
17410
17411 _prev_end_offset = cur_offset + envelope_size;
17412 if 2 > max_ordinal {
17413 return Ok(());
17414 }
17415
17416 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17417 // are envelope_size bytes.
17418 let cur_offset: usize = (2 - 1) * envelope_size;
17419
17420 // Zero reserved fields.
17421 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17422
17423 // Safety:
17424 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17425 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17426 // envelope_size bytes, there is always sufficient room.
17427 fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
17428 self.buffer_collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
17429 encoder, offset + cur_offset, depth
17430 )?;
17431
17432 _prev_end_offset = cur_offset + envelope_size;
17433
17434 Ok(())
17435 }
17436 }
17437
17438 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17439 for AllocatorBindSharedCollectionRequest
17440 {
17441 #[inline(always)]
17442 fn new_empty() -> Self {
17443 Self::default()
17444 }
17445
17446 unsafe fn decode(
17447 &mut self,
17448 decoder: &mut fidl::encoding::Decoder<
17449 '_,
17450 fidl::encoding::DefaultFuchsiaResourceDialect,
17451 >,
17452 offset: usize,
17453 mut depth: fidl::encoding::Depth,
17454 ) -> fidl::Result<()> {
17455 decoder.debug_check_bounds::<Self>(offset);
17456 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17457 None => return Err(fidl::Error::NotNullable),
17458 Some(len) => len,
17459 };
17460 // Calling decoder.out_of_line_offset(0) is not allowed.
17461 if len == 0 {
17462 return Ok(());
17463 };
17464 depth.increment()?;
17465 let envelope_size = 8;
17466 let bytes_len = len * envelope_size;
17467 let offset = decoder.out_of_line_offset(bytes_len)?;
17468 // Decode the envelope for each type.
17469 let mut _next_ordinal_to_read = 0;
17470 let mut next_offset = offset;
17471 let end_offset = offset + bytes_len;
17472 _next_ordinal_to_read += 1;
17473 if next_offset >= end_offset {
17474 return Ok(());
17475 }
17476
17477 // Decode unknown envelopes for gaps in ordinals.
17478 while _next_ordinal_to_read < 1 {
17479 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17480 _next_ordinal_to_read += 1;
17481 next_offset += envelope_size;
17482 }
17483
17484 let next_out_of_line = decoder.next_out_of_line();
17485 let handles_before = decoder.remaining_handles();
17486 if let Some((inlined, num_bytes, num_handles)) =
17487 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17488 {
17489 let member_inline_size = <fidl::encoding::Endpoint<
17490 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17491 > as fidl::encoding::TypeMarker>::inline_size(
17492 decoder.context
17493 );
17494 if inlined != (member_inline_size <= 4) {
17495 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17496 }
17497 let inner_offset;
17498 let mut inner_depth = depth.clone();
17499 if inlined {
17500 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17501 inner_offset = next_offset;
17502 } else {
17503 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17504 inner_depth.increment()?;
17505 }
17506 let val_ref = self.token.get_or_insert_with(|| {
17507 fidl::new_empty!(
17508 fidl::encoding::Endpoint<
17509 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17510 >,
17511 fidl::encoding::DefaultFuchsiaResourceDialect
17512 )
17513 });
17514 fidl::decode!(
17515 fidl::encoding::Endpoint<
17516 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17517 >,
17518 fidl::encoding::DefaultFuchsiaResourceDialect,
17519 val_ref,
17520 decoder,
17521 inner_offset,
17522 inner_depth
17523 )?;
17524 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17525 {
17526 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17527 }
17528 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17529 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17530 }
17531 }
17532
17533 next_offset += envelope_size;
17534 _next_ordinal_to_read += 1;
17535 if next_offset >= end_offset {
17536 return Ok(());
17537 }
17538
17539 // Decode unknown envelopes for gaps in ordinals.
17540 while _next_ordinal_to_read < 2 {
17541 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17542 _next_ordinal_to_read += 1;
17543 next_offset += envelope_size;
17544 }
17545
17546 let next_out_of_line = decoder.next_out_of_line();
17547 let handles_before = decoder.remaining_handles();
17548 if let Some((inlined, num_bytes, num_handles)) =
17549 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17550 {
17551 let member_inline_size = <fidl::encoding::Endpoint<
17552 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17553 > as fidl::encoding::TypeMarker>::inline_size(
17554 decoder.context
17555 );
17556 if inlined != (member_inline_size <= 4) {
17557 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17558 }
17559 let inner_offset;
17560 let mut inner_depth = depth.clone();
17561 if inlined {
17562 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17563 inner_offset = next_offset;
17564 } else {
17565 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17566 inner_depth.increment()?;
17567 }
17568 let val_ref = self.buffer_collection_request.get_or_insert_with(|| {
17569 fidl::new_empty!(
17570 fidl::encoding::Endpoint<
17571 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17572 >,
17573 fidl::encoding::DefaultFuchsiaResourceDialect
17574 )
17575 });
17576 fidl::decode!(
17577 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17578 fidl::encoding::DefaultFuchsiaResourceDialect,
17579 val_ref,
17580 decoder,
17581 inner_offset,
17582 inner_depth
17583 )?;
17584 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17585 {
17586 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17587 }
17588 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17589 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17590 }
17591 }
17592
17593 next_offset += envelope_size;
17594
17595 // Decode the remaining unknown envelopes.
17596 while next_offset < end_offset {
17597 _next_ordinal_to_read += 1;
17598 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17599 next_offset += envelope_size;
17600 }
17601
17602 Ok(())
17603 }
17604 }
17605
17606 impl AllocatorGetVmoInfoRequest {
17607 #[inline(always)]
17608 fn max_ordinal_present(&self) -> u64 {
17609 if let Some(_) = self.vmo {
17610 return 1;
17611 }
17612 0
17613 }
17614 }
17615
17616 impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoRequest {
17617 type Borrowed<'a> = &'a mut Self;
17618 fn take_or_borrow<'a>(
17619 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17620 ) -> Self::Borrowed<'a> {
17621 value
17622 }
17623 }
17624
17625 unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoRequest {
17626 type Owned = Self;
17627
17628 #[inline(always)]
17629 fn inline_align(_context: fidl::encoding::Context) -> usize {
17630 8
17631 }
17632
17633 #[inline(always)]
17634 fn inline_size(_context: fidl::encoding::Context) -> usize {
17635 16
17636 }
17637 }
17638
17639 unsafe impl
17640 fidl::encoding::Encode<
17641 AllocatorGetVmoInfoRequest,
17642 fidl::encoding::DefaultFuchsiaResourceDialect,
17643 > for &mut AllocatorGetVmoInfoRequest
17644 {
17645 unsafe fn encode(
17646 self,
17647 encoder: &mut fidl::encoding::Encoder<
17648 '_,
17649 fidl::encoding::DefaultFuchsiaResourceDialect,
17650 >,
17651 offset: usize,
17652 mut depth: fidl::encoding::Depth,
17653 ) -> fidl::Result<()> {
17654 encoder.debug_check_bounds::<AllocatorGetVmoInfoRequest>(offset);
17655 // Vector header
17656 let max_ordinal: u64 = self.max_ordinal_present();
17657 encoder.write_num(max_ordinal, offset);
17658 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17659 // Calling encoder.out_of_line_offset(0) is not allowed.
17660 if max_ordinal == 0 {
17661 return Ok(());
17662 }
17663 depth.increment()?;
17664 let envelope_size = 8;
17665 let bytes_len = max_ordinal as usize * envelope_size;
17666 #[allow(unused_variables)]
17667 let offset = encoder.out_of_line_offset(bytes_len);
17668 let mut _prev_end_offset: usize = 0;
17669 if 1 > max_ordinal {
17670 return Ok(());
17671 }
17672
17673 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17674 // are envelope_size bytes.
17675 let cur_offset: usize = (1 - 1) * envelope_size;
17676
17677 // Zero reserved fields.
17678 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17679
17680 // Safety:
17681 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17682 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17683 // envelope_size bytes, there is always sufficient room.
17684 fidl::encoding::encode_in_envelope_optional::<
17685 fidl::encoding::HandleType<
17686 fidl::Vmo,
17687 { fidl::ObjectType::VMO.into_raw() },
17688 2147483648,
17689 >,
17690 fidl::encoding::DefaultFuchsiaResourceDialect,
17691 >(
17692 self.vmo.as_mut().map(
17693 <fidl::encoding::HandleType<
17694 fidl::Vmo,
17695 { fidl::ObjectType::VMO.into_raw() },
17696 2147483648,
17697 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17698 ),
17699 encoder,
17700 offset + cur_offset,
17701 depth,
17702 )?;
17703
17704 _prev_end_offset = cur_offset + envelope_size;
17705
17706 Ok(())
17707 }
17708 }
17709
17710 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17711 for AllocatorGetVmoInfoRequest
17712 {
17713 #[inline(always)]
17714 fn new_empty() -> Self {
17715 Self::default()
17716 }
17717
17718 unsafe fn decode(
17719 &mut self,
17720 decoder: &mut fidl::encoding::Decoder<
17721 '_,
17722 fidl::encoding::DefaultFuchsiaResourceDialect,
17723 >,
17724 offset: usize,
17725 mut depth: fidl::encoding::Depth,
17726 ) -> fidl::Result<()> {
17727 decoder.debug_check_bounds::<Self>(offset);
17728 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17729 None => return Err(fidl::Error::NotNullable),
17730 Some(len) => len,
17731 };
17732 // Calling decoder.out_of_line_offset(0) is not allowed.
17733 if len == 0 {
17734 return Ok(());
17735 };
17736 depth.increment()?;
17737 let envelope_size = 8;
17738 let bytes_len = len * envelope_size;
17739 let offset = decoder.out_of_line_offset(bytes_len)?;
17740 // Decode the envelope for each type.
17741 let mut _next_ordinal_to_read = 0;
17742 let mut next_offset = offset;
17743 let end_offset = offset + bytes_len;
17744 _next_ordinal_to_read += 1;
17745 if next_offset >= end_offset {
17746 return Ok(());
17747 }
17748
17749 // Decode unknown envelopes for gaps in ordinals.
17750 while _next_ordinal_to_read < 1 {
17751 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17752 _next_ordinal_to_read += 1;
17753 next_offset += envelope_size;
17754 }
17755
17756 let next_out_of_line = decoder.next_out_of_line();
17757 let handles_before = decoder.remaining_handles();
17758 if let Some((inlined, num_bytes, num_handles)) =
17759 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17760 {
17761 let member_inline_size = <fidl::encoding::HandleType<
17762 fidl::Vmo,
17763 { fidl::ObjectType::VMO.into_raw() },
17764 2147483648,
17765 > as fidl::encoding::TypeMarker>::inline_size(
17766 decoder.context
17767 );
17768 if inlined != (member_inline_size <= 4) {
17769 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17770 }
17771 let inner_offset;
17772 let mut inner_depth = depth.clone();
17773 if inlined {
17774 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17775 inner_offset = next_offset;
17776 } else {
17777 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17778 inner_depth.increment()?;
17779 }
17780 let val_ref =
17781 self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
17782 fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
17783 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17784 {
17785 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17786 }
17787 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17788 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17789 }
17790 }
17791
17792 next_offset += envelope_size;
17793
17794 // Decode the remaining unknown envelopes.
17795 while next_offset < end_offset {
17796 _next_ordinal_to_read += 1;
17797 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17798 next_offset += envelope_size;
17799 }
17800
17801 Ok(())
17802 }
17803 }
17804
17805 impl AllocatorGetVmoInfoResponse {
17806 #[inline(always)]
17807 fn max_ordinal_present(&self) -> u64 {
17808 if let Some(_) = self.close_weak_asap {
17809 return 3;
17810 }
17811 if let Some(_) = self.buffer_index {
17812 return 2;
17813 }
17814 if let Some(_) = self.buffer_collection_id {
17815 return 1;
17816 }
17817 0
17818 }
17819 }
17820
17821 impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoResponse {
17822 type Borrowed<'a> = &'a mut Self;
17823 fn take_or_borrow<'a>(
17824 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17825 ) -> Self::Borrowed<'a> {
17826 value
17827 }
17828 }
17829
17830 unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoResponse {
17831 type Owned = Self;
17832
17833 #[inline(always)]
17834 fn inline_align(_context: fidl::encoding::Context) -> usize {
17835 8
17836 }
17837
17838 #[inline(always)]
17839 fn inline_size(_context: fidl::encoding::Context) -> usize {
17840 16
17841 }
17842 }
17843
17844 unsafe impl
17845 fidl::encoding::Encode<
17846 AllocatorGetVmoInfoResponse,
17847 fidl::encoding::DefaultFuchsiaResourceDialect,
17848 > for &mut AllocatorGetVmoInfoResponse
17849 {
17850 unsafe fn encode(
17851 self,
17852 encoder: &mut fidl::encoding::Encoder<
17853 '_,
17854 fidl::encoding::DefaultFuchsiaResourceDialect,
17855 >,
17856 offset: usize,
17857 mut depth: fidl::encoding::Depth,
17858 ) -> fidl::Result<()> {
17859 encoder.debug_check_bounds::<AllocatorGetVmoInfoResponse>(offset);
17860 // Vector header
17861 let max_ordinal: u64 = self.max_ordinal_present();
17862 encoder.write_num(max_ordinal, offset);
17863 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17864 // Calling encoder.out_of_line_offset(0) is not allowed.
17865 if max_ordinal == 0 {
17866 return Ok(());
17867 }
17868 depth.increment()?;
17869 let envelope_size = 8;
17870 let bytes_len = max_ordinal as usize * envelope_size;
17871 #[allow(unused_variables)]
17872 let offset = encoder.out_of_line_offset(bytes_len);
17873 let mut _prev_end_offset: usize = 0;
17874 if 1 > max_ordinal {
17875 return Ok(());
17876 }
17877
17878 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17879 // are envelope_size bytes.
17880 let cur_offset: usize = (1 - 1) * envelope_size;
17881
17882 // Zero reserved fields.
17883 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17884
17885 // Safety:
17886 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17887 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17888 // envelope_size bytes, there is always sufficient room.
17889 fidl::encoding::encode_in_envelope_optional::<
17890 u64,
17891 fidl::encoding::DefaultFuchsiaResourceDialect,
17892 >(
17893 self.buffer_collection_id
17894 .as_ref()
17895 .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17896 encoder,
17897 offset + cur_offset,
17898 depth,
17899 )?;
17900
17901 _prev_end_offset = cur_offset + envelope_size;
17902 if 2 > max_ordinal {
17903 return Ok(());
17904 }
17905
17906 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17907 // are envelope_size bytes.
17908 let cur_offset: usize = (2 - 1) * envelope_size;
17909
17910 // Zero reserved fields.
17911 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17912
17913 // Safety:
17914 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17915 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17916 // envelope_size bytes, there is always sufficient room.
17917 fidl::encoding::encode_in_envelope_optional::<
17918 u64,
17919 fidl::encoding::DefaultFuchsiaResourceDialect,
17920 >(
17921 self.buffer_index.as_ref().map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17922 encoder,
17923 offset + cur_offset,
17924 depth,
17925 )?;
17926
17927 _prev_end_offset = cur_offset + envelope_size;
17928 if 3 > max_ordinal {
17929 return Ok(());
17930 }
17931
17932 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17933 // are envelope_size bytes.
17934 let cur_offset: usize = (3 - 1) * envelope_size;
17935
17936 // Zero reserved fields.
17937 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17938
17939 // Safety:
17940 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17941 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17942 // envelope_size bytes, there is always sufficient room.
17943 fidl::encoding::encode_in_envelope_optional::<
17944 fidl::encoding::HandleType<
17945 fidl::EventPair,
17946 { fidl::ObjectType::EVENTPAIR.into_raw() },
17947 2147483648,
17948 >,
17949 fidl::encoding::DefaultFuchsiaResourceDialect,
17950 >(
17951 self.close_weak_asap.as_mut().map(
17952 <fidl::encoding::HandleType<
17953 fidl::EventPair,
17954 { fidl::ObjectType::EVENTPAIR.into_raw() },
17955 2147483648,
17956 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17957 ),
17958 encoder,
17959 offset + cur_offset,
17960 depth,
17961 )?;
17962
17963 _prev_end_offset = cur_offset + envelope_size;
17964
17965 Ok(())
17966 }
17967 }
17968
17969 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17970 for AllocatorGetVmoInfoResponse
17971 {
17972 #[inline(always)]
17973 fn new_empty() -> Self {
17974 Self::default()
17975 }
17976
17977 unsafe fn decode(
17978 &mut self,
17979 decoder: &mut fidl::encoding::Decoder<
17980 '_,
17981 fidl::encoding::DefaultFuchsiaResourceDialect,
17982 >,
17983 offset: usize,
17984 mut depth: fidl::encoding::Depth,
17985 ) -> fidl::Result<()> {
17986 decoder.debug_check_bounds::<Self>(offset);
17987 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17988 None => return Err(fidl::Error::NotNullable),
17989 Some(len) => len,
17990 };
17991 // Calling decoder.out_of_line_offset(0) is not allowed.
17992 if len == 0 {
17993 return Ok(());
17994 };
17995 depth.increment()?;
17996 let envelope_size = 8;
17997 let bytes_len = len * envelope_size;
17998 let offset = decoder.out_of_line_offset(bytes_len)?;
17999 // Decode the envelope for each type.
18000 let mut _next_ordinal_to_read = 0;
18001 let mut next_offset = offset;
18002 let end_offset = offset + bytes_len;
18003 _next_ordinal_to_read += 1;
18004 if next_offset >= end_offset {
18005 return Ok(());
18006 }
18007
18008 // Decode unknown envelopes for gaps in ordinals.
18009 while _next_ordinal_to_read < 1 {
18010 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18011 _next_ordinal_to_read += 1;
18012 next_offset += envelope_size;
18013 }
18014
18015 let next_out_of_line = decoder.next_out_of_line();
18016 let handles_before = decoder.remaining_handles();
18017 if let Some((inlined, num_bytes, num_handles)) =
18018 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18019 {
18020 let member_inline_size =
18021 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18022 if inlined != (member_inline_size <= 4) {
18023 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18024 }
18025 let inner_offset;
18026 let mut inner_depth = depth.clone();
18027 if inlined {
18028 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18029 inner_offset = next_offset;
18030 } else {
18031 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18032 inner_depth.increment()?;
18033 }
18034 let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
18035 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
18036 });
18037 fidl::decode!(
18038 u64,
18039 fidl::encoding::DefaultFuchsiaResourceDialect,
18040 val_ref,
18041 decoder,
18042 inner_offset,
18043 inner_depth
18044 )?;
18045 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18046 {
18047 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18048 }
18049 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18050 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18051 }
18052 }
18053
18054 next_offset += envelope_size;
18055 _next_ordinal_to_read += 1;
18056 if next_offset >= end_offset {
18057 return Ok(());
18058 }
18059
18060 // Decode unknown envelopes for gaps in ordinals.
18061 while _next_ordinal_to_read < 2 {
18062 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18063 _next_ordinal_to_read += 1;
18064 next_offset += envelope_size;
18065 }
18066
18067 let next_out_of_line = decoder.next_out_of_line();
18068 let handles_before = decoder.remaining_handles();
18069 if let Some((inlined, num_bytes, num_handles)) =
18070 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18071 {
18072 let member_inline_size =
18073 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18074 if inlined != (member_inline_size <= 4) {
18075 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18076 }
18077 let inner_offset;
18078 let mut inner_depth = depth.clone();
18079 if inlined {
18080 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18081 inner_offset = next_offset;
18082 } else {
18083 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18084 inner_depth.increment()?;
18085 }
18086 let val_ref = self.buffer_index.get_or_insert_with(|| {
18087 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
18088 });
18089 fidl::decode!(
18090 u64,
18091 fidl::encoding::DefaultFuchsiaResourceDialect,
18092 val_ref,
18093 decoder,
18094 inner_offset,
18095 inner_depth
18096 )?;
18097 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18098 {
18099 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18100 }
18101 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18102 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18103 }
18104 }
18105
18106 next_offset += envelope_size;
18107 _next_ordinal_to_read += 1;
18108 if next_offset >= end_offset {
18109 return Ok(());
18110 }
18111
18112 // Decode unknown envelopes for gaps in ordinals.
18113 while _next_ordinal_to_read < 3 {
18114 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18115 _next_ordinal_to_read += 1;
18116 next_offset += envelope_size;
18117 }
18118
18119 let next_out_of_line = decoder.next_out_of_line();
18120 let handles_before = decoder.remaining_handles();
18121 if let Some((inlined, num_bytes, num_handles)) =
18122 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18123 {
18124 let member_inline_size = <fidl::encoding::HandleType<
18125 fidl::EventPair,
18126 { fidl::ObjectType::EVENTPAIR.into_raw() },
18127 2147483648,
18128 > as fidl::encoding::TypeMarker>::inline_size(
18129 decoder.context
18130 );
18131 if inlined != (member_inline_size <= 4) {
18132 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18133 }
18134 let inner_offset;
18135 let mut inner_depth = depth.clone();
18136 if inlined {
18137 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18138 inner_offset = next_offset;
18139 } else {
18140 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18141 inner_depth.increment()?;
18142 }
18143 let val_ref =
18144 self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18145 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18146 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18147 {
18148 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18149 }
18150 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18151 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18152 }
18153 }
18154
18155 next_offset += envelope_size;
18156
18157 // Decode the remaining unknown envelopes.
18158 while next_offset < end_offset {
18159 _next_ordinal_to_read += 1;
18160 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18161 next_offset += envelope_size;
18162 }
18163
18164 Ok(())
18165 }
18166 }
18167
18168 impl BufferCollectionAttachLifetimeTrackingRequest {
18169 #[inline(always)]
18170 fn max_ordinal_present(&self) -> u64 {
18171 if let Some(_) = self.buffers_remaining {
18172 return 2;
18173 }
18174 if let Some(_) = self.server_end {
18175 return 1;
18176 }
18177 0
18178 }
18179 }
18180
18181 impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18182 type Borrowed<'a> = &'a mut Self;
18183 fn take_or_borrow<'a>(
18184 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18185 ) -> Self::Borrowed<'a> {
18186 value
18187 }
18188 }
18189
18190 unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18191 type Owned = Self;
18192
18193 #[inline(always)]
18194 fn inline_align(_context: fidl::encoding::Context) -> usize {
18195 8
18196 }
18197
18198 #[inline(always)]
18199 fn inline_size(_context: fidl::encoding::Context) -> usize {
18200 16
18201 }
18202 }
18203
18204 unsafe impl
18205 fidl::encoding::Encode<
18206 BufferCollectionAttachLifetimeTrackingRequest,
18207 fidl::encoding::DefaultFuchsiaResourceDialect,
18208 > for &mut BufferCollectionAttachLifetimeTrackingRequest
18209 {
18210 unsafe fn encode(
18211 self,
18212 encoder: &mut fidl::encoding::Encoder<
18213 '_,
18214 fidl::encoding::DefaultFuchsiaResourceDialect,
18215 >,
18216 offset: usize,
18217 mut depth: fidl::encoding::Depth,
18218 ) -> fidl::Result<()> {
18219 encoder.debug_check_bounds::<BufferCollectionAttachLifetimeTrackingRequest>(offset);
18220 // Vector header
18221 let max_ordinal: u64 = self.max_ordinal_present();
18222 encoder.write_num(max_ordinal, offset);
18223 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18224 // Calling encoder.out_of_line_offset(0) is not allowed.
18225 if max_ordinal == 0 {
18226 return Ok(());
18227 }
18228 depth.increment()?;
18229 let envelope_size = 8;
18230 let bytes_len = max_ordinal as usize * envelope_size;
18231 #[allow(unused_variables)]
18232 let offset = encoder.out_of_line_offset(bytes_len);
18233 let mut _prev_end_offset: usize = 0;
18234 if 1 > max_ordinal {
18235 return Ok(());
18236 }
18237
18238 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18239 // are envelope_size bytes.
18240 let cur_offset: usize = (1 - 1) * envelope_size;
18241
18242 // Zero reserved fields.
18243 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18244
18245 // Safety:
18246 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18247 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18248 // envelope_size bytes, there is always sufficient room.
18249 fidl::encoding::encode_in_envelope_optional::<
18250 fidl::encoding::HandleType<
18251 fidl::EventPair,
18252 { fidl::ObjectType::EVENTPAIR.into_raw() },
18253 2147483648,
18254 >,
18255 fidl::encoding::DefaultFuchsiaResourceDialect,
18256 >(
18257 self.server_end.as_mut().map(
18258 <fidl::encoding::HandleType<
18259 fidl::EventPair,
18260 { fidl::ObjectType::EVENTPAIR.into_raw() },
18261 2147483648,
18262 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18263 ),
18264 encoder,
18265 offset + cur_offset,
18266 depth,
18267 )?;
18268
18269 _prev_end_offset = cur_offset + envelope_size;
18270 if 2 > max_ordinal {
18271 return Ok(());
18272 }
18273
18274 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18275 // are envelope_size bytes.
18276 let cur_offset: usize = (2 - 1) * envelope_size;
18277
18278 // Zero reserved fields.
18279 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18280
18281 // Safety:
18282 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18283 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18284 // envelope_size bytes, there is always sufficient room.
18285 fidl::encoding::encode_in_envelope_optional::<
18286 u32,
18287 fidl::encoding::DefaultFuchsiaResourceDialect,
18288 >(
18289 self.buffers_remaining
18290 .as_ref()
18291 .map(<u32 as fidl::encoding::ValueTypeMarker>::borrow),
18292 encoder,
18293 offset + cur_offset,
18294 depth,
18295 )?;
18296
18297 _prev_end_offset = cur_offset + envelope_size;
18298
18299 Ok(())
18300 }
18301 }
18302
18303 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18304 for BufferCollectionAttachLifetimeTrackingRequest
18305 {
18306 #[inline(always)]
18307 fn new_empty() -> Self {
18308 Self::default()
18309 }
18310
18311 unsafe fn decode(
18312 &mut self,
18313 decoder: &mut fidl::encoding::Decoder<
18314 '_,
18315 fidl::encoding::DefaultFuchsiaResourceDialect,
18316 >,
18317 offset: usize,
18318 mut depth: fidl::encoding::Depth,
18319 ) -> fidl::Result<()> {
18320 decoder.debug_check_bounds::<Self>(offset);
18321 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18322 None => return Err(fidl::Error::NotNullable),
18323 Some(len) => len,
18324 };
18325 // Calling decoder.out_of_line_offset(0) is not allowed.
18326 if len == 0 {
18327 return Ok(());
18328 };
18329 depth.increment()?;
18330 let envelope_size = 8;
18331 let bytes_len = len * envelope_size;
18332 let offset = decoder.out_of_line_offset(bytes_len)?;
18333 // Decode the envelope for each type.
18334 let mut _next_ordinal_to_read = 0;
18335 let mut next_offset = offset;
18336 let end_offset = offset + bytes_len;
18337 _next_ordinal_to_read += 1;
18338 if next_offset >= end_offset {
18339 return Ok(());
18340 }
18341
18342 // Decode unknown envelopes for gaps in ordinals.
18343 while _next_ordinal_to_read < 1 {
18344 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18345 _next_ordinal_to_read += 1;
18346 next_offset += envelope_size;
18347 }
18348
18349 let next_out_of_line = decoder.next_out_of_line();
18350 let handles_before = decoder.remaining_handles();
18351 if let Some((inlined, num_bytes, num_handles)) =
18352 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18353 {
18354 let member_inline_size = <fidl::encoding::HandleType<
18355 fidl::EventPair,
18356 { fidl::ObjectType::EVENTPAIR.into_raw() },
18357 2147483648,
18358 > as fidl::encoding::TypeMarker>::inline_size(
18359 decoder.context
18360 );
18361 if inlined != (member_inline_size <= 4) {
18362 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18363 }
18364 let inner_offset;
18365 let mut inner_depth = depth.clone();
18366 if inlined {
18367 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18368 inner_offset = next_offset;
18369 } else {
18370 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18371 inner_depth.increment()?;
18372 }
18373 let val_ref =
18374 self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18375 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18376 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18377 {
18378 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18379 }
18380 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18381 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18382 }
18383 }
18384
18385 next_offset += envelope_size;
18386 _next_ordinal_to_read += 1;
18387 if next_offset >= end_offset {
18388 return Ok(());
18389 }
18390
18391 // Decode unknown envelopes for gaps in ordinals.
18392 while _next_ordinal_to_read < 2 {
18393 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18394 _next_ordinal_to_read += 1;
18395 next_offset += envelope_size;
18396 }
18397
18398 let next_out_of_line = decoder.next_out_of_line();
18399 let handles_before = decoder.remaining_handles();
18400 if let Some((inlined, num_bytes, num_handles)) =
18401 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18402 {
18403 let member_inline_size =
18404 <u32 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18405 if inlined != (member_inline_size <= 4) {
18406 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18407 }
18408 let inner_offset;
18409 let mut inner_depth = depth.clone();
18410 if inlined {
18411 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18412 inner_offset = next_offset;
18413 } else {
18414 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18415 inner_depth.increment()?;
18416 }
18417 let val_ref = self.buffers_remaining.get_or_insert_with(|| {
18418 fidl::new_empty!(u32, fidl::encoding::DefaultFuchsiaResourceDialect)
18419 });
18420 fidl::decode!(
18421 u32,
18422 fidl::encoding::DefaultFuchsiaResourceDialect,
18423 val_ref,
18424 decoder,
18425 inner_offset,
18426 inner_depth
18427 )?;
18428 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18429 {
18430 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18431 }
18432 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18433 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18434 }
18435 }
18436
18437 next_offset += envelope_size;
18438
18439 // Decode the remaining unknown envelopes.
18440 while next_offset < end_offset {
18441 _next_ordinal_to_read += 1;
18442 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18443 next_offset += envelope_size;
18444 }
18445
18446 Ok(())
18447 }
18448 }
18449
18450 impl BufferCollectionAttachTokenRequest {
18451 #[inline(always)]
18452 fn max_ordinal_present(&self) -> u64 {
18453 if let Some(_) = self.token_request {
18454 return 2;
18455 }
18456 if let Some(_) = self.rights_attenuation_mask {
18457 return 1;
18458 }
18459 0
18460 }
18461 }
18462
18463 impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachTokenRequest {
18464 type Borrowed<'a> = &'a mut Self;
18465 fn take_or_borrow<'a>(
18466 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18467 ) -> Self::Borrowed<'a> {
18468 value
18469 }
18470 }
18471
18472 unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachTokenRequest {
18473 type Owned = Self;
18474
18475 #[inline(always)]
18476 fn inline_align(_context: fidl::encoding::Context) -> usize {
18477 8
18478 }
18479
18480 #[inline(always)]
18481 fn inline_size(_context: fidl::encoding::Context) -> usize {
18482 16
18483 }
18484 }
18485
18486 unsafe impl
18487 fidl::encoding::Encode<
18488 BufferCollectionAttachTokenRequest,
18489 fidl::encoding::DefaultFuchsiaResourceDialect,
18490 > for &mut BufferCollectionAttachTokenRequest
18491 {
18492 unsafe fn encode(
18493 self,
18494 encoder: &mut fidl::encoding::Encoder<
18495 '_,
18496 fidl::encoding::DefaultFuchsiaResourceDialect,
18497 >,
18498 offset: usize,
18499 mut depth: fidl::encoding::Depth,
18500 ) -> fidl::Result<()> {
18501 encoder.debug_check_bounds::<BufferCollectionAttachTokenRequest>(offset);
18502 // Vector header
18503 let max_ordinal: u64 = self.max_ordinal_present();
18504 encoder.write_num(max_ordinal, offset);
18505 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18506 // Calling encoder.out_of_line_offset(0) is not allowed.
18507 if max_ordinal == 0 {
18508 return Ok(());
18509 }
18510 depth.increment()?;
18511 let envelope_size = 8;
18512 let bytes_len = max_ordinal as usize * envelope_size;
18513 #[allow(unused_variables)]
18514 let offset = encoder.out_of_line_offset(bytes_len);
18515 let mut _prev_end_offset: usize = 0;
18516 if 1 > max_ordinal {
18517 return Ok(());
18518 }
18519
18520 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18521 // are envelope_size bytes.
18522 let cur_offset: usize = (1 - 1) * envelope_size;
18523
18524 // Zero reserved fields.
18525 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18526
18527 // Safety:
18528 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18529 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18530 // envelope_size bytes, there is always sufficient room.
18531 fidl::encoding::encode_in_envelope_optional::<
18532 fidl::Rights,
18533 fidl::encoding::DefaultFuchsiaResourceDialect,
18534 >(
18535 self.rights_attenuation_mask
18536 .as_ref()
18537 .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
18538 encoder,
18539 offset + cur_offset,
18540 depth,
18541 )?;
18542
18543 _prev_end_offset = cur_offset + envelope_size;
18544 if 2 > max_ordinal {
18545 return Ok(());
18546 }
18547
18548 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18549 // are envelope_size bytes.
18550 let cur_offset: usize = (2 - 1) * envelope_size;
18551
18552 // Zero reserved fields.
18553 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18554
18555 // Safety:
18556 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18557 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18558 // envelope_size bytes, there is always sufficient room.
18559 fidl::encoding::encode_in_envelope_optional::<
18560 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
18561 fidl::encoding::DefaultFuchsiaResourceDialect,
18562 >(
18563 self.token_request.as_mut().map(
18564 <fidl::encoding::Endpoint<
18565 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18566 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18567 ),
18568 encoder,
18569 offset + cur_offset,
18570 depth,
18571 )?;
18572
18573 _prev_end_offset = cur_offset + envelope_size;
18574
18575 Ok(())
18576 }
18577 }
18578
18579 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18580 for BufferCollectionAttachTokenRequest
18581 {
18582 #[inline(always)]
18583 fn new_empty() -> Self {
18584 Self::default()
18585 }
18586
18587 unsafe fn decode(
18588 &mut self,
18589 decoder: &mut fidl::encoding::Decoder<
18590 '_,
18591 fidl::encoding::DefaultFuchsiaResourceDialect,
18592 >,
18593 offset: usize,
18594 mut depth: fidl::encoding::Depth,
18595 ) -> fidl::Result<()> {
18596 decoder.debug_check_bounds::<Self>(offset);
18597 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18598 None => return Err(fidl::Error::NotNullable),
18599 Some(len) => len,
18600 };
18601 // Calling decoder.out_of_line_offset(0) is not allowed.
18602 if len == 0 {
18603 return Ok(());
18604 };
18605 depth.increment()?;
18606 let envelope_size = 8;
18607 let bytes_len = len * envelope_size;
18608 let offset = decoder.out_of_line_offset(bytes_len)?;
18609 // Decode the envelope for each type.
18610 let mut _next_ordinal_to_read = 0;
18611 let mut next_offset = offset;
18612 let end_offset = offset + bytes_len;
18613 _next_ordinal_to_read += 1;
18614 if next_offset >= end_offset {
18615 return Ok(());
18616 }
18617
18618 // Decode unknown envelopes for gaps in ordinals.
18619 while _next_ordinal_to_read < 1 {
18620 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18621 _next_ordinal_to_read += 1;
18622 next_offset += envelope_size;
18623 }
18624
18625 let next_out_of_line = decoder.next_out_of_line();
18626 let handles_before = decoder.remaining_handles();
18627 if let Some((inlined, num_bytes, num_handles)) =
18628 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18629 {
18630 let member_inline_size =
18631 <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18632 if inlined != (member_inline_size <= 4) {
18633 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18634 }
18635 let inner_offset;
18636 let mut inner_depth = depth.clone();
18637 if inlined {
18638 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18639 inner_offset = next_offset;
18640 } else {
18641 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18642 inner_depth.increment()?;
18643 }
18644 let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
18645 fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
18646 });
18647 fidl::decode!(
18648 fidl::Rights,
18649 fidl::encoding::DefaultFuchsiaResourceDialect,
18650 val_ref,
18651 decoder,
18652 inner_offset,
18653 inner_depth
18654 )?;
18655 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18656 {
18657 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18658 }
18659 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18660 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18661 }
18662 }
18663
18664 next_offset += envelope_size;
18665 _next_ordinal_to_read += 1;
18666 if next_offset >= end_offset {
18667 return Ok(());
18668 }
18669
18670 // Decode unknown envelopes for gaps in ordinals.
18671 while _next_ordinal_to_read < 2 {
18672 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18673 _next_ordinal_to_read += 1;
18674 next_offset += envelope_size;
18675 }
18676
18677 let next_out_of_line = decoder.next_out_of_line();
18678 let handles_before = decoder.remaining_handles();
18679 if let Some((inlined, num_bytes, num_handles)) =
18680 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18681 {
18682 let member_inline_size = <fidl::encoding::Endpoint<
18683 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18684 > as fidl::encoding::TypeMarker>::inline_size(
18685 decoder.context
18686 );
18687 if inlined != (member_inline_size <= 4) {
18688 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18689 }
18690 let inner_offset;
18691 let mut inner_depth = depth.clone();
18692 if inlined {
18693 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18694 inner_offset = next_offset;
18695 } else {
18696 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18697 inner_depth.increment()?;
18698 }
18699 let val_ref = self.token_request.get_or_insert_with(|| {
18700 fidl::new_empty!(
18701 fidl::encoding::Endpoint<
18702 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18703 >,
18704 fidl::encoding::DefaultFuchsiaResourceDialect
18705 )
18706 });
18707 fidl::decode!(
18708 fidl::encoding::Endpoint<
18709 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18710 >,
18711 fidl::encoding::DefaultFuchsiaResourceDialect,
18712 val_ref,
18713 decoder,
18714 inner_offset,
18715 inner_depth
18716 )?;
18717 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18718 {
18719 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18720 }
18721 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18722 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18723 }
18724 }
18725
18726 next_offset += envelope_size;
18727
18728 // Decode the remaining unknown envelopes.
18729 while next_offset < end_offset {
18730 _next_ordinal_to_read += 1;
18731 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18732 next_offset += envelope_size;
18733 }
18734
18735 Ok(())
18736 }
18737 }
18738
18739 impl BufferCollectionInfo {
18740 #[inline(always)]
18741 fn max_ordinal_present(&self) -> u64 {
18742 if let Some(_) = self.buffer_collection_id {
18743 return 3;
18744 }
18745 if let Some(_) = self.buffers {
18746 return 2;
18747 }
18748 if let Some(_) = self.settings {
18749 return 1;
18750 }
18751 0
18752 }
18753 }
18754
18755 impl fidl::encoding::ResourceTypeMarker for BufferCollectionInfo {
18756 type Borrowed<'a> = &'a mut Self;
18757 fn take_or_borrow<'a>(
18758 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18759 ) -> Self::Borrowed<'a> {
18760 value
18761 }
18762 }
18763
18764 unsafe impl fidl::encoding::TypeMarker for BufferCollectionInfo {
18765 type Owned = Self;
18766
18767 #[inline(always)]
18768 fn inline_align(_context: fidl::encoding::Context) -> usize {
18769 8
18770 }
18771
18772 #[inline(always)]
18773 fn inline_size(_context: fidl::encoding::Context) -> usize {
18774 16
18775 }
18776 }
18777
18778 unsafe impl
18779 fidl::encoding::Encode<BufferCollectionInfo, fidl::encoding::DefaultFuchsiaResourceDialect>
18780 for &mut BufferCollectionInfo
18781 {
18782 unsafe fn encode(
18783 self,
18784 encoder: &mut fidl::encoding::Encoder<
18785 '_,
18786 fidl::encoding::DefaultFuchsiaResourceDialect,
18787 >,
18788 offset: usize,
18789 mut depth: fidl::encoding::Depth,
18790 ) -> fidl::Result<()> {
18791 encoder.debug_check_bounds::<BufferCollectionInfo>(offset);
18792 // Vector header
18793 let max_ordinal: u64 = self.max_ordinal_present();
18794 encoder.write_num(max_ordinal, offset);
18795 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18796 // Calling encoder.out_of_line_offset(0) is not allowed.
18797 if max_ordinal == 0 {
18798 return Ok(());
18799 }
18800 depth.increment()?;
18801 let envelope_size = 8;
18802 let bytes_len = max_ordinal as usize * envelope_size;
18803 #[allow(unused_variables)]
18804 let offset = encoder.out_of_line_offset(bytes_len);
18805 let mut _prev_end_offset: usize = 0;
18806 if 1 > max_ordinal {
18807 return Ok(());
18808 }
18809
18810 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18811 // are envelope_size bytes.
18812 let cur_offset: usize = (1 - 1) * envelope_size;
18813
18814 // Zero reserved fields.
18815 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18816
18817 // Safety:
18818 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18819 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18820 // envelope_size bytes, there is always sufficient room.
18821 fidl::encoding::encode_in_envelope_optional::<
18822 SingleBufferSettings,
18823 fidl::encoding::DefaultFuchsiaResourceDialect,
18824 >(
18825 self.settings
18826 .as_ref()
18827 .map(<SingleBufferSettings as fidl::encoding::ValueTypeMarker>::borrow),
18828 encoder,
18829 offset + cur_offset,
18830 depth,
18831 )?;
18832
18833 _prev_end_offset = cur_offset + envelope_size;
18834 if 2 > max_ordinal {
18835 return Ok(());
18836 }
18837
18838 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18839 // are envelope_size bytes.
18840 let cur_offset: usize = (2 - 1) * envelope_size;
18841
18842 // Zero reserved fields.
18843 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18844
18845 // Safety:
18846 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18847 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18848 // envelope_size bytes, there is always sufficient room.
18849 fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect>(
18850 self.buffers.as_mut().map(<fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
18851 encoder, offset + cur_offset, depth
18852 )?;
18853
18854 _prev_end_offset = cur_offset + envelope_size;
18855 if 3 > max_ordinal {
18856 return Ok(());
18857 }
18858
18859 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18860 // are envelope_size bytes.
18861 let cur_offset: usize = (3 - 1) * envelope_size;
18862
18863 // Zero reserved fields.
18864 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18865
18866 // Safety:
18867 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18868 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18869 // envelope_size bytes, there is always sufficient room.
18870 fidl::encoding::encode_in_envelope_optional::<
18871 u64,
18872 fidl::encoding::DefaultFuchsiaResourceDialect,
18873 >(
18874 self.buffer_collection_id
18875 .as_ref()
18876 .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
18877 encoder,
18878 offset + cur_offset,
18879 depth,
18880 )?;
18881
18882 _prev_end_offset = cur_offset + envelope_size;
18883
18884 Ok(())
18885 }
18886 }
18887
18888 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18889 for BufferCollectionInfo
18890 {
18891 #[inline(always)]
18892 fn new_empty() -> Self {
18893 Self::default()
18894 }
18895
18896 unsafe fn decode(
18897 &mut self,
18898 decoder: &mut fidl::encoding::Decoder<
18899 '_,
18900 fidl::encoding::DefaultFuchsiaResourceDialect,
18901 >,
18902 offset: usize,
18903 mut depth: fidl::encoding::Depth,
18904 ) -> fidl::Result<()> {
18905 decoder.debug_check_bounds::<Self>(offset);
18906 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18907 None => return Err(fidl::Error::NotNullable),
18908 Some(len) => len,
18909 };
18910 // Calling decoder.out_of_line_offset(0) is not allowed.
18911 if len == 0 {
18912 return Ok(());
18913 };
18914 depth.increment()?;
18915 let envelope_size = 8;
18916 let bytes_len = len * envelope_size;
18917 let offset = decoder.out_of_line_offset(bytes_len)?;
18918 // Decode the envelope for each type.
18919 let mut _next_ordinal_to_read = 0;
18920 let mut next_offset = offset;
18921 let end_offset = offset + bytes_len;
18922 _next_ordinal_to_read += 1;
18923 if next_offset >= end_offset {
18924 return Ok(());
18925 }
18926
18927 // Decode unknown envelopes for gaps in ordinals.
18928 while _next_ordinal_to_read < 1 {
18929 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18930 _next_ordinal_to_read += 1;
18931 next_offset += envelope_size;
18932 }
18933
18934 let next_out_of_line = decoder.next_out_of_line();
18935 let handles_before = decoder.remaining_handles();
18936 if let Some((inlined, num_bytes, num_handles)) =
18937 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18938 {
18939 let member_inline_size =
18940 <SingleBufferSettings as fidl::encoding::TypeMarker>::inline_size(
18941 decoder.context,
18942 );
18943 if inlined != (member_inline_size <= 4) {
18944 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18945 }
18946 let inner_offset;
18947 let mut inner_depth = depth.clone();
18948 if inlined {
18949 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18950 inner_offset = next_offset;
18951 } else {
18952 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18953 inner_depth.increment()?;
18954 }
18955 let val_ref = self.settings.get_or_insert_with(|| {
18956 fidl::new_empty!(
18957 SingleBufferSettings,
18958 fidl::encoding::DefaultFuchsiaResourceDialect
18959 )
18960 });
18961 fidl::decode!(
18962 SingleBufferSettings,
18963 fidl::encoding::DefaultFuchsiaResourceDialect,
18964 val_ref,
18965 decoder,
18966 inner_offset,
18967 inner_depth
18968 )?;
18969 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18970 {
18971 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18972 }
18973 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18974 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18975 }
18976 }
18977
18978 next_offset += envelope_size;
18979 _next_ordinal_to_read += 1;
18980 if next_offset >= end_offset {
18981 return Ok(());
18982 }
18983
18984 // Decode unknown envelopes for gaps in ordinals.
18985 while _next_ordinal_to_read < 2 {
18986 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18987 _next_ordinal_to_read += 1;
18988 next_offset += envelope_size;
18989 }
18990
18991 let next_out_of_line = decoder.next_out_of_line();
18992 let handles_before = decoder.remaining_handles();
18993 if let Some((inlined, num_bytes, num_handles)) =
18994 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18995 {
18996 let member_inline_size = <fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18997 if inlined != (member_inline_size <= 4) {
18998 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18999 }
19000 let inner_offset;
19001 let mut inner_depth = depth.clone();
19002 if inlined {
19003 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19004 inner_offset = next_offset;
19005 } else {
19006 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19007 inner_depth.increment()?;
19008 }
19009 let val_ref =
19010 self.buffers.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect));
19011 fidl::decode!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
19012 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19013 {
19014 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19015 }
19016 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19017 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19018 }
19019 }
19020
19021 next_offset += envelope_size;
19022 _next_ordinal_to_read += 1;
19023 if next_offset >= end_offset {
19024 return Ok(());
19025 }
19026
19027 // Decode unknown envelopes for gaps in ordinals.
19028 while _next_ordinal_to_read < 3 {
19029 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19030 _next_ordinal_to_read += 1;
19031 next_offset += envelope_size;
19032 }
19033
19034 let next_out_of_line = decoder.next_out_of_line();
19035 let handles_before = decoder.remaining_handles();
19036 if let Some((inlined, num_bytes, num_handles)) =
19037 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19038 {
19039 let member_inline_size =
19040 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19041 if inlined != (member_inline_size <= 4) {
19042 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19043 }
19044 let inner_offset;
19045 let mut inner_depth = depth.clone();
19046 if inlined {
19047 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19048 inner_offset = next_offset;
19049 } else {
19050 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19051 inner_depth.increment()?;
19052 }
19053 let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
19054 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
19055 });
19056 fidl::decode!(
19057 u64,
19058 fidl::encoding::DefaultFuchsiaResourceDialect,
19059 val_ref,
19060 decoder,
19061 inner_offset,
19062 inner_depth
19063 )?;
19064 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19065 {
19066 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19067 }
19068 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19069 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19070 }
19071 }
19072
19073 next_offset += envelope_size;
19074
19075 // Decode the remaining unknown envelopes.
19076 while next_offset < end_offset {
19077 _next_ordinal_to_read += 1;
19078 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19079 next_offset += envelope_size;
19080 }
19081
19082 Ok(())
19083 }
19084 }
19085
19086 impl BufferCollectionSetConstraintsRequest {
19087 #[inline(always)]
19088 fn max_ordinal_present(&self) -> u64 {
19089 if let Some(_) = self.constraints {
19090 return 1;
19091 }
19092 0
19093 }
19094 }
19095
19096 impl fidl::encoding::ResourceTypeMarker for BufferCollectionSetConstraintsRequest {
19097 type Borrowed<'a> = &'a mut Self;
19098 fn take_or_borrow<'a>(
19099 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19100 ) -> Self::Borrowed<'a> {
19101 value
19102 }
19103 }
19104
19105 unsafe impl fidl::encoding::TypeMarker for BufferCollectionSetConstraintsRequest {
19106 type Owned = Self;
19107
19108 #[inline(always)]
19109 fn inline_align(_context: fidl::encoding::Context) -> usize {
19110 8
19111 }
19112
19113 #[inline(always)]
19114 fn inline_size(_context: fidl::encoding::Context) -> usize {
19115 16
19116 }
19117 }
19118
19119 unsafe impl
19120 fidl::encoding::Encode<
19121 BufferCollectionSetConstraintsRequest,
19122 fidl::encoding::DefaultFuchsiaResourceDialect,
19123 > for &mut BufferCollectionSetConstraintsRequest
19124 {
19125 unsafe fn encode(
19126 self,
19127 encoder: &mut fidl::encoding::Encoder<
19128 '_,
19129 fidl::encoding::DefaultFuchsiaResourceDialect,
19130 >,
19131 offset: usize,
19132 mut depth: fidl::encoding::Depth,
19133 ) -> fidl::Result<()> {
19134 encoder.debug_check_bounds::<BufferCollectionSetConstraintsRequest>(offset);
19135 // Vector header
19136 let max_ordinal: u64 = self.max_ordinal_present();
19137 encoder.write_num(max_ordinal, offset);
19138 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19139 // Calling encoder.out_of_line_offset(0) is not allowed.
19140 if max_ordinal == 0 {
19141 return Ok(());
19142 }
19143 depth.increment()?;
19144 let envelope_size = 8;
19145 let bytes_len = max_ordinal as usize * envelope_size;
19146 #[allow(unused_variables)]
19147 let offset = encoder.out_of_line_offset(bytes_len);
19148 let mut _prev_end_offset: usize = 0;
19149 if 1 > max_ordinal {
19150 return Ok(());
19151 }
19152
19153 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19154 // are envelope_size bytes.
19155 let cur_offset: usize = (1 - 1) * envelope_size;
19156
19157 // Zero reserved fields.
19158 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19159
19160 // Safety:
19161 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19162 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19163 // envelope_size bytes, there is always sufficient room.
19164 fidl::encoding::encode_in_envelope_optional::<
19165 BufferCollectionConstraints,
19166 fidl::encoding::DefaultFuchsiaResourceDialect,
19167 >(
19168 self.constraints
19169 .as_ref()
19170 .map(<BufferCollectionConstraints as fidl::encoding::ValueTypeMarker>::borrow),
19171 encoder,
19172 offset + cur_offset,
19173 depth,
19174 )?;
19175
19176 _prev_end_offset = cur_offset + envelope_size;
19177
19178 Ok(())
19179 }
19180 }
19181
19182 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19183 for BufferCollectionSetConstraintsRequest
19184 {
19185 #[inline(always)]
19186 fn new_empty() -> Self {
19187 Self::default()
19188 }
19189
19190 unsafe fn decode(
19191 &mut self,
19192 decoder: &mut fidl::encoding::Decoder<
19193 '_,
19194 fidl::encoding::DefaultFuchsiaResourceDialect,
19195 >,
19196 offset: usize,
19197 mut depth: fidl::encoding::Depth,
19198 ) -> fidl::Result<()> {
19199 decoder.debug_check_bounds::<Self>(offset);
19200 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19201 None => return Err(fidl::Error::NotNullable),
19202 Some(len) => len,
19203 };
19204 // Calling decoder.out_of_line_offset(0) is not allowed.
19205 if len == 0 {
19206 return Ok(());
19207 };
19208 depth.increment()?;
19209 let envelope_size = 8;
19210 let bytes_len = len * envelope_size;
19211 let offset = decoder.out_of_line_offset(bytes_len)?;
19212 // Decode the envelope for each type.
19213 let mut _next_ordinal_to_read = 0;
19214 let mut next_offset = offset;
19215 let end_offset = offset + bytes_len;
19216 _next_ordinal_to_read += 1;
19217 if next_offset >= end_offset {
19218 return Ok(());
19219 }
19220
19221 // Decode unknown envelopes for gaps in ordinals.
19222 while _next_ordinal_to_read < 1 {
19223 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19224 _next_ordinal_to_read += 1;
19225 next_offset += envelope_size;
19226 }
19227
19228 let next_out_of_line = decoder.next_out_of_line();
19229 let handles_before = decoder.remaining_handles();
19230 if let Some((inlined, num_bytes, num_handles)) =
19231 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19232 {
19233 let member_inline_size =
19234 <BufferCollectionConstraints as fidl::encoding::TypeMarker>::inline_size(
19235 decoder.context,
19236 );
19237 if inlined != (member_inline_size <= 4) {
19238 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19239 }
19240 let inner_offset;
19241 let mut inner_depth = depth.clone();
19242 if inlined {
19243 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19244 inner_offset = next_offset;
19245 } else {
19246 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19247 inner_depth.increment()?;
19248 }
19249 let val_ref = self.constraints.get_or_insert_with(|| {
19250 fidl::new_empty!(
19251 BufferCollectionConstraints,
19252 fidl::encoding::DefaultFuchsiaResourceDialect
19253 )
19254 });
19255 fidl::decode!(
19256 BufferCollectionConstraints,
19257 fidl::encoding::DefaultFuchsiaResourceDialect,
19258 val_ref,
19259 decoder,
19260 inner_offset,
19261 inner_depth
19262 )?;
19263 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19264 {
19265 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19266 }
19267 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19268 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19269 }
19270 }
19271
19272 next_offset += envelope_size;
19273
19274 // Decode the remaining unknown envelopes.
19275 while next_offset < end_offset {
19276 _next_ordinal_to_read += 1;
19277 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19278 next_offset += envelope_size;
19279 }
19280
19281 Ok(())
19282 }
19283 }
19284
19285 impl BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
19286 #[inline(always)]
19287 fn max_ordinal_present(&self) -> u64 {
19288 if let Some(_) = self.group_request {
19289 return 1;
19290 }
19291 0
19292 }
19293 }
19294
19295 impl fidl::encoding::ResourceTypeMarker
19296 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19297 {
19298 type Borrowed<'a> = &'a mut Self;
19299 fn take_or_borrow<'a>(
19300 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19301 ) -> Self::Borrowed<'a> {
19302 value
19303 }
19304 }
19305
19306 unsafe impl fidl::encoding::TypeMarker
19307 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19308 {
19309 type Owned = Self;
19310
19311 #[inline(always)]
19312 fn inline_align(_context: fidl::encoding::Context) -> usize {
19313 8
19314 }
19315
19316 #[inline(always)]
19317 fn inline_size(_context: fidl::encoding::Context) -> usize {
19318 16
19319 }
19320 }
19321
19322 unsafe impl
19323 fidl::encoding::Encode<
19324 BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
19325 fidl::encoding::DefaultFuchsiaResourceDialect,
19326 > for &mut BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19327 {
19328 unsafe fn encode(
19329 self,
19330 encoder: &mut fidl::encoding::Encoder<
19331 '_,
19332 fidl::encoding::DefaultFuchsiaResourceDialect,
19333 >,
19334 offset: usize,
19335 mut depth: fidl::encoding::Depth,
19336 ) -> fidl::Result<()> {
19337 encoder
19338 .debug_check_bounds::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
19339 offset,
19340 );
19341 // Vector header
19342 let max_ordinal: u64 = self.max_ordinal_present();
19343 encoder.write_num(max_ordinal, offset);
19344 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19345 // Calling encoder.out_of_line_offset(0) is not allowed.
19346 if max_ordinal == 0 {
19347 return Ok(());
19348 }
19349 depth.increment()?;
19350 let envelope_size = 8;
19351 let bytes_len = max_ordinal as usize * envelope_size;
19352 #[allow(unused_variables)]
19353 let offset = encoder.out_of_line_offset(bytes_len);
19354 let mut _prev_end_offset: usize = 0;
19355 if 1 > max_ordinal {
19356 return Ok(());
19357 }
19358
19359 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19360 // are envelope_size bytes.
19361 let cur_offset: usize = (1 - 1) * envelope_size;
19362
19363 // Zero reserved fields.
19364 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19365
19366 // Safety:
19367 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19368 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19369 // envelope_size bytes, there is always sufficient room.
19370 fidl::encoding::encode_in_envelope_optional::<
19371 fidl::encoding::Endpoint<
19372 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19373 >,
19374 fidl::encoding::DefaultFuchsiaResourceDialect,
19375 >(
19376 self.group_request.as_mut().map(
19377 <fidl::encoding::Endpoint<
19378 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19379 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19380 ),
19381 encoder,
19382 offset + cur_offset,
19383 depth,
19384 )?;
19385
19386 _prev_end_offset = cur_offset + envelope_size;
19387
19388 Ok(())
19389 }
19390 }
19391
19392 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19393 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19394 {
19395 #[inline(always)]
19396 fn new_empty() -> Self {
19397 Self::default()
19398 }
19399
19400 unsafe fn decode(
19401 &mut self,
19402 decoder: &mut fidl::encoding::Decoder<
19403 '_,
19404 fidl::encoding::DefaultFuchsiaResourceDialect,
19405 >,
19406 offset: usize,
19407 mut depth: fidl::encoding::Depth,
19408 ) -> fidl::Result<()> {
19409 decoder.debug_check_bounds::<Self>(offset);
19410 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19411 None => return Err(fidl::Error::NotNullable),
19412 Some(len) => len,
19413 };
19414 // Calling decoder.out_of_line_offset(0) is not allowed.
19415 if len == 0 {
19416 return Ok(());
19417 };
19418 depth.increment()?;
19419 let envelope_size = 8;
19420 let bytes_len = len * envelope_size;
19421 let offset = decoder.out_of_line_offset(bytes_len)?;
19422 // Decode the envelope for each type.
19423 let mut _next_ordinal_to_read = 0;
19424 let mut next_offset = offset;
19425 let end_offset = offset + bytes_len;
19426 _next_ordinal_to_read += 1;
19427 if next_offset >= end_offset {
19428 return Ok(());
19429 }
19430
19431 // Decode unknown envelopes for gaps in ordinals.
19432 while _next_ordinal_to_read < 1 {
19433 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19434 _next_ordinal_to_read += 1;
19435 next_offset += envelope_size;
19436 }
19437
19438 let next_out_of_line = decoder.next_out_of_line();
19439 let handles_before = decoder.remaining_handles();
19440 if let Some((inlined, num_bytes, num_handles)) =
19441 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19442 {
19443 let member_inline_size = <fidl::encoding::Endpoint<
19444 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19445 > as fidl::encoding::TypeMarker>::inline_size(
19446 decoder.context
19447 );
19448 if inlined != (member_inline_size <= 4) {
19449 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19450 }
19451 let inner_offset;
19452 let mut inner_depth = depth.clone();
19453 if inlined {
19454 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19455 inner_offset = next_offset;
19456 } else {
19457 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19458 inner_depth.increment()?;
19459 }
19460 let val_ref = self.group_request.get_or_insert_with(|| {
19461 fidl::new_empty!(
19462 fidl::encoding::Endpoint<
19463 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19464 >,
19465 fidl::encoding::DefaultFuchsiaResourceDialect
19466 )
19467 });
19468 fidl::decode!(
19469 fidl::encoding::Endpoint<
19470 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19471 >,
19472 fidl::encoding::DefaultFuchsiaResourceDialect,
19473 val_ref,
19474 decoder,
19475 inner_offset,
19476 inner_depth
19477 )?;
19478 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19479 {
19480 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19481 }
19482 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19483 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19484 }
19485 }
19486
19487 next_offset += envelope_size;
19488
19489 // Decode the remaining unknown envelopes.
19490 while next_offset < end_offset {
19491 _next_ordinal_to_read += 1;
19492 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19493 next_offset += envelope_size;
19494 }
19495
19496 Ok(())
19497 }
19498 }
19499
19500 impl BufferCollectionTokenDuplicateRequest {
19501 #[inline(always)]
19502 fn max_ordinal_present(&self) -> u64 {
19503 if let Some(_) = self.token_request {
19504 return 2;
19505 }
19506 if let Some(_) = self.rights_attenuation_mask {
19507 return 1;
19508 }
19509 0
19510 }
19511 }
19512
19513 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateRequest {
19514 type Borrowed<'a> = &'a mut Self;
19515 fn take_or_borrow<'a>(
19516 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19517 ) -> Self::Borrowed<'a> {
19518 value
19519 }
19520 }
19521
19522 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateRequest {
19523 type Owned = Self;
19524
19525 #[inline(always)]
19526 fn inline_align(_context: fidl::encoding::Context) -> usize {
19527 8
19528 }
19529
19530 #[inline(always)]
19531 fn inline_size(_context: fidl::encoding::Context) -> usize {
19532 16
19533 }
19534 }
19535
19536 unsafe impl
19537 fidl::encoding::Encode<
19538 BufferCollectionTokenDuplicateRequest,
19539 fidl::encoding::DefaultFuchsiaResourceDialect,
19540 > for &mut BufferCollectionTokenDuplicateRequest
19541 {
19542 unsafe fn encode(
19543 self,
19544 encoder: &mut fidl::encoding::Encoder<
19545 '_,
19546 fidl::encoding::DefaultFuchsiaResourceDialect,
19547 >,
19548 offset: usize,
19549 mut depth: fidl::encoding::Depth,
19550 ) -> fidl::Result<()> {
19551 encoder.debug_check_bounds::<BufferCollectionTokenDuplicateRequest>(offset);
19552 // Vector header
19553 let max_ordinal: u64 = self.max_ordinal_present();
19554 encoder.write_num(max_ordinal, offset);
19555 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19556 // Calling encoder.out_of_line_offset(0) is not allowed.
19557 if max_ordinal == 0 {
19558 return Ok(());
19559 }
19560 depth.increment()?;
19561 let envelope_size = 8;
19562 let bytes_len = max_ordinal as usize * envelope_size;
19563 #[allow(unused_variables)]
19564 let offset = encoder.out_of_line_offset(bytes_len);
19565 let mut _prev_end_offset: usize = 0;
19566 if 1 > max_ordinal {
19567 return Ok(());
19568 }
19569
19570 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19571 // are envelope_size bytes.
19572 let cur_offset: usize = (1 - 1) * envelope_size;
19573
19574 // Zero reserved fields.
19575 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19576
19577 // Safety:
19578 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19579 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19580 // envelope_size bytes, there is always sufficient room.
19581 fidl::encoding::encode_in_envelope_optional::<
19582 fidl::Rights,
19583 fidl::encoding::DefaultFuchsiaResourceDialect,
19584 >(
19585 self.rights_attenuation_mask
19586 .as_ref()
19587 .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19588 encoder,
19589 offset + cur_offset,
19590 depth,
19591 )?;
19592
19593 _prev_end_offset = cur_offset + envelope_size;
19594 if 2 > max_ordinal {
19595 return Ok(());
19596 }
19597
19598 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19599 // are envelope_size bytes.
19600 let cur_offset: usize = (2 - 1) * envelope_size;
19601
19602 // Zero reserved fields.
19603 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19604
19605 // Safety:
19606 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19607 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19608 // envelope_size bytes, there is always sufficient room.
19609 fidl::encoding::encode_in_envelope_optional::<
19610 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19611 fidl::encoding::DefaultFuchsiaResourceDialect,
19612 >(
19613 self.token_request.as_mut().map(
19614 <fidl::encoding::Endpoint<
19615 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19616 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19617 ),
19618 encoder,
19619 offset + cur_offset,
19620 depth,
19621 )?;
19622
19623 _prev_end_offset = cur_offset + envelope_size;
19624
19625 Ok(())
19626 }
19627 }
19628
19629 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19630 for BufferCollectionTokenDuplicateRequest
19631 {
19632 #[inline(always)]
19633 fn new_empty() -> Self {
19634 Self::default()
19635 }
19636
19637 unsafe fn decode(
19638 &mut self,
19639 decoder: &mut fidl::encoding::Decoder<
19640 '_,
19641 fidl::encoding::DefaultFuchsiaResourceDialect,
19642 >,
19643 offset: usize,
19644 mut depth: fidl::encoding::Depth,
19645 ) -> fidl::Result<()> {
19646 decoder.debug_check_bounds::<Self>(offset);
19647 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19648 None => return Err(fidl::Error::NotNullable),
19649 Some(len) => len,
19650 };
19651 // Calling decoder.out_of_line_offset(0) is not allowed.
19652 if len == 0 {
19653 return Ok(());
19654 };
19655 depth.increment()?;
19656 let envelope_size = 8;
19657 let bytes_len = len * envelope_size;
19658 let offset = decoder.out_of_line_offset(bytes_len)?;
19659 // Decode the envelope for each type.
19660 let mut _next_ordinal_to_read = 0;
19661 let mut next_offset = offset;
19662 let end_offset = offset + bytes_len;
19663 _next_ordinal_to_read += 1;
19664 if next_offset >= end_offset {
19665 return Ok(());
19666 }
19667
19668 // Decode unknown envelopes for gaps in ordinals.
19669 while _next_ordinal_to_read < 1 {
19670 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19671 _next_ordinal_to_read += 1;
19672 next_offset += envelope_size;
19673 }
19674
19675 let next_out_of_line = decoder.next_out_of_line();
19676 let handles_before = decoder.remaining_handles();
19677 if let Some((inlined, num_bytes, num_handles)) =
19678 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19679 {
19680 let member_inline_size =
19681 <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19682 if inlined != (member_inline_size <= 4) {
19683 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19684 }
19685 let inner_offset;
19686 let mut inner_depth = depth.clone();
19687 if inlined {
19688 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19689 inner_offset = next_offset;
19690 } else {
19691 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19692 inner_depth.increment()?;
19693 }
19694 let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
19695 fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
19696 });
19697 fidl::decode!(
19698 fidl::Rights,
19699 fidl::encoding::DefaultFuchsiaResourceDialect,
19700 val_ref,
19701 decoder,
19702 inner_offset,
19703 inner_depth
19704 )?;
19705 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19706 {
19707 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19708 }
19709 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19710 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19711 }
19712 }
19713
19714 next_offset += envelope_size;
19715 _next_ordinal_to_read += 1;
19716 if next_offset >= end_offset {
19717 return Ok(());
19718 }
19719
19720 // Decode unknown envelopes for gaps in ordinals.
19721 while _next_ordinal_to_read < 2 {
19722 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19723 _next_ordinal_to_read += 1;
19724 next_offset += envelope_size;
19725 }
19726
19727 let next_out_of_line = decoder.next_out_of_line();
19728 let handles_before = decoder.remaining_handles();
19729 if let Some((inlined, num_bytes, num_handles)) =
19730 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19731 {
19732 let member_inline_size = <fidl::encoding::Endpoint<
19733 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19734 > as fidl::encoding::TypeMarker>::inline_size(
19735 decoder.context
19736 );
19737 if inlined != (member_inline_size <= 4) {
19738 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19739 }
19740 let inner_offset;
19741 let mut inner_depth = depth.clone();
19742 if inlined {
19743 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19744 inner_offset = next_offset;
19745 } else {
19746 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19747 inner_depth.increment()?;
19748 }
19749 let val_ref = self.token_request.get_or_insert_with(|| {
19750 fidl::new_empty!(
19751 fidl::encoding::Endpoint<
19752 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19753 >,
19754 fidl::encoding::DefaultFuchsiaResourceDialect
19755 )
19756 });
19757 fidl::decode!(
19758 fidl::encoding::Endpoint<
19759 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19760 >,
19761 fidl::encoding::DefaultFuchsiaResourceDialect,
19762 val_ref,
19763 decoder,
19764 inner_offset,
19765 inner_depth
19766 )?;
19767 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19768 {
19769 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19770 }
19771 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19772 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19773 }
19774 }
19775
19776 next_offset += envelope_size;
19777
19778 // Decode the remaining unknown envelopes.
19779 while next_offset < end_offset {
19780 _next_ordinal_to_read += 1;
19781 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19782 next_offset += envelope_size;
19783 }
19784
19785 Ok(())
19786 }
19787 }
19788
19789 impl BufferCollectionTokenGroupCreateChildRequest {
19790 #[inline(always)]
19791 fn max_ordinal_present(&self) -> u64 {
19792 if let Some(_) = self.rights_attenuation_mask {
19793 return 2;
19794 }
19795 if let Some(_) = self.token_request {
19796 return 1;
19797 }
19798 0
19799 }
19800 }
19801
19802 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19803 type Borrowed<'a> = &'a mut Self;
19804 fn take_or_borrow<'a>(
19805 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19806 ) -> Self::Borrowed<'a> {
19807 value
19808 }
19809 }
19810
19811 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19812 type Owned = Self;
19813
19814 #[inline(always)]
19815 fn inline_align(_context: fidl::encoding::Context) -> usize {
19816 8
19817 }
19818
19819 #[inline(always)]
19820 fn inline_size(_context: fidl::encoding::Context) -> usize {
19821 16
19822 }
19823 }
19824
19825 unsafe impl
19826 fidl::encoding::Encode<
19827 BufferCollectionTokenGroupCreateChildRequest,
19828 fidl::encoding::DefaultFuchsiaResourceDialect,
19829 > for &mut BufferCollectionTokenGroupCreateChildRequest
19830 {
19831 unsafe fn encode(
19832 self,
19833 encoder: &mut fidl::encoding::Encoder<
19834 '_,
19835 fidl::encoding::DefaultFuchsiaResourceDialect,
19836 >,
19837 offset: usize,
19838 mut depth: fidl::encoding::Depth,
19839 ) -> fidl::Result<()> {
19840 encoder.debug_check_bounds::<BufferCollectionTokenGroupCreateChildRequest>(offset);
19841 // Vector header
19842 let max_ordinal: u64 = self.max_ordinal_present();
19843 encoder.write_num(max_ordinal, offset);
19844 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19845 // Calling encoder.out_of_line_offset(0) is not allowed.
19846 if max_ordinal == 0 {
19847 return Ok(());
19848 }
19849 depth.increment()?;
19850 let envelope_size = 8;
19851 let bytes_len = max_ordinal as usize * envelope_size;
19852 #[allow(unused_variables)]
19853 let offset = encoder.out_of_line_offset(bytes_len);
19854 let mut _prev_end_offset: usize = 0;
19855 if 1 > max_ordinal {
19856 return Ok(());
19857 }
19858
19859 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19860 // are envelope_size bytes.
19861 let cur_offset: usize = (1 - 1) * envelope_size;
19862
19863 // Zero reserved fields.
19864 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19865
19866 // Safety:
19867 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19868 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19869 // envelope_size bytes, there is always sufficient room.
19870 fidl::encoding::encode_in_envelope_optional::<
19871 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19872 fidl::encoding::DefaultFuchsiaResourceDialect,
19873 >(
19874 self.token_request.as_mut().map(
19875 <fidl::encoding::Endpoint<
19876 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19877 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19878 ),
19879 encoder,
19880 offset + cur_offset,
19881 depth,
19882 )?;
19883
19884 _prev_end_offset = cur_offset + envelope_size;
19885 if 2 > max_ordinal {
19886 return Ok(());
19887 }
19888
19889 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19890 // are envelope_size bytes.
19891 let cur_offset: usize = (2 - 1) * envelope_size;
19892
19893 // Zero reserved fields.
19894 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19895
19896 // Safety:
19897 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19898 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19899 // envelope_size bytes, there is always sufficient room.
19900 fidl::encoding::encode_in_envelope_optional::<
19901 fidl::Rights,
19902 fidl::encoding::DefaultFuchsiaResourceDialect,
19903 >(
19904 self.rights_attenuation_mask
19905 .as_ref()
19906 .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19907 encoder,
19908 offset + cur_offset,
19909 depth,
19910 )?;
19911
19912 _prev_end_offset = cur_offset + envelope_size;
19913
19914 Ok(())
19915 }
19916 }
19917
19918 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19919 for BufferCollectionTokenGroupCreateChildRequest
19920 {
19921 #[inline(always)]
19922 fn new_empty() -> Self {
19923 Self::default()
19924 }
19925
19926 unsafe fn decode(
19927 &mut self,
19928 decoder: &mut fidl::encoding::Decoder<
19929 '_,
19930 fidl::encoding::DefaultFuchsiaResourceDialect,
19931 >,
19932 offset: usize,
19933 mut depth: fidl::encoding::Depth,
19934 ) -> fidl::Result<()> {
19935 decoder.debug_check_bounds::<Self>(offset);
19936 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19937 None => return Err(fidl::Error::NotNullable),
19938 Some(len) => len,
19939 };
19940 // Calling decoder.out_of_line_offset(0) is not allowed.
19941 if len == 0 {
19942 return Ok(());
19943 };
19944 depth.increment()?;
19945 let envelope_size = 8;
19946 let bytes_len = len * envelope_size;
19947 let offset = decoder.out_of_line_offset(bytes_len)?;
19948 // Decode the envelope for each type.
19949 let mut _next_ordinal_to_read = 0;
19950 let mut next_offset = offset;
19951 let end_offset = offset + bytes_len;
19952 _next_ordinal_to_read += 1;
19953 if next_offset >= end_offset {
19954 return Ok(());
19955 }
19956
19957 // Decode unknown envelopes for gaps in ordinals.
19958 while _next_ordinal_to_read < 1 {
19959 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19960 _next_ordinal_to_read += 1;
19961 next_offset += envelope_size;
19962 }
19963
19964 let next_out_of_line = decoder.next_out_of_line();
19965 let handles_before = decoder.remaining_handles();
19966 if let Some((inlined, num_bytes, num_handles)) =
19967 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19968 {
19969 let member_inline_size = <fidl::encoding::Endpoint<
19970 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19971 > as fidl::encoding::TypeMarker>::inline_size(
19972 decoder.context
19973 );
19974 if inlined != (member_inline_size <= 4) {
19975 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19976 }
19977 let inner_offset;
19978 let mut inner_depth = depth.clone();
19979 if inlined {
19980 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19981 inner_offset = next_offset;
19982 } else {
19983 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19984 inner_depth.increment()?;
19985 }
19986 let val_ref = self.token_request.get_or_insert_with(|| {
19987 fidl::new_empty!(
19988 fidl::encoding::Endpoint<
19989 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19990 >,
19991 fidl::encoding::DefaultFuchsiaResourceDialect
19992 )
19993 });
19994 fidl::decode!(
19995 fidl::encoding::Endpoint<
19996 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19997 >,
19998 fidl::encoding::DefaultFuchsiaResourceDialect,
19999 val_ref,
20000 decoder,
20001 inner_offset,
20002 inner_depth
20003 )?;
20004 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20005 {
20006 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20007 }
20008 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20009 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20010 }
20011 }
20012
20013 next_offset += envelope_size;
20014 _next_ordinal_to_read += 1;
20015 if next_offset >= end_offset {
20016 return Ok(());
20017 }
20018
20019 // Decode unknown envelopes for gaps in ordinals.
20020 while _next_ordinal_to_read < 2 {
20021 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20022 _next_ordinal_to_read += 1;
20023 next_offset += envelope_size;
20024 }
20025
20026 let next_out_of_line = decoder.next_out_of_line();
20027 let handles_before = decoder.remaining_handles();
20028 if let Some((inlined, num_bytes, num_handles)) =
20029 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20030 {
20031 let member_inline_size =
20032 <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
20033 if inlined != (member_inline_size <= 4) {
20034 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20035 }
20036 let inner_offset;
20037 let mut inner_depth = depth.clone();
20038 if inlined {
20039 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20040 inner_offset = next_offset;
20041 } else {
20042 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20043 inner_depth.increment()?;
20044 }
20045 let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
20046 fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
20047 });
20048 fidl::decode!(
20049 fidl::Rights,
20050 fidl::encoding::DefaultFuchsiaResourceDialect,
20051 val_ref,
20052 decoder,
20053 inner_offset,
20054 inner_depth
20055 )?;
20056 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20057 {
20058 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20059 }
20060 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20061 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20062 }
20063 }
20064
20065 next_offset += envelope_size;
20066
20067 // Decode the remaining unknown envelopes.
20068 while next_offset < end_offset {
20069 _next_ordinal_to_read += 1;
20070 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20071 next_offset += envelope_size;
20072 }
20073
20074 Ok(())
20075 }
20076 }
20077
20078 impl BufferCollectionTokenGroupCreateChildrenSyncResponse {
20079 #[inline(always)]
20080 fn max_ordinal_present(&self) -> u64 {
20081 if let Some(_) = self.tokens {
20082 return 1;
20083 }
20084 0
20085 }
20086 }
20087
20088 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20089 type Borrowed<'a> = &'a mut Self;
20090 fn take_or_borrow<'a>(
20091 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20092 ) -> Self::Borrowed<'a> {
20093 value
20094 }
20095 }
20096
20097 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20098 type Owned = Self;
20099
20100 #[inline(always)]
20101 fn inline_align(_context: fidl::encoding::Context) -> usize {
20102 8
20103 }
20104
20105 #[inline(always)]
20106 fn inline_size(_context: fidl::encoding::Context) -> usize {
20107 16
20108 }
20109 }
20110
20111 unsafe impl
20112 fidl::encoding::Encode<
20113 BufferCollectionTokenGroupCreateChildrenSyncResponse,
20114 fidl::encoding::DefaultFuchsiaResourceDialect,
20115 > for &mut BufferCollectionTokenGroupCreateChildrenSyncResponse
20116 {
20117 unsafe fn encode(
20118 self,
20119 encoder: &mut fidl::encoding::Encoder<
20120 '_,
20121 fidl::encoding::DefaultFuchsiaResourceDialect,
20122 >,
20123 offset: usize,
20124 mut depth: fidl::encoding::Depth,
20125 ) -> fidl::Result<()> {
20126 encoder
20127 .debug_check_bounds::<BufferCollectionTokenGroupCreateChildrenSyncResponse>(offset);
20128 // Vector header
20129 let max_ordinal: u64 = self.max_ordinal_present();
20130 encoder.write_num(max_ordinal, offset);
20131 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20132 // Calling encoder.out_of_line_offset(0) is not allowed.
20133 if max_ordinal == 0 {
20134 return Ok(());
20135 }
20136 depth.increment()?;
20137 let envelope_size = 8;
20138 let bytes_len = max_ordinal as usize * envelope_size;
20139 #[allow(unused_variables)]
20140 let offset = encoder.out_of_line_offset(bytes_len);
20141 let mut _prev_end_offset: usize = 0;
20142 if 1 > max_ordinal {
20143 return Ok(());
20144 }
20145
20146 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20147 // are envelope_size bytes.
20148 let cur_offset: usize = (1 - 1) * envelope_size;
20149
20150 // Zero reserved fields.
20151 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20152
20153 // Safety:
20154 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20155 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20156 // envelope_size bytes, there is always sufficient room.
20157 fidl::encoding::encode_in_envelope_optional::<
20158 fidl::encoding::Vector<
20159 fidl::encoding::Endpoint<
20160 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20161 >,
20162 64,
20163 >,
20164 fidl::encoding::DefaultFuchsiaResourceDialect,
20165 >(
20166 self.tokens.as_mut().map(
20167 <fidl::encoding::Vector<
20168 fidl::encoding::Endpoint<
20169 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20170 >,
20171 64,
20172 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20173 ),
20174 encoder,
20175 offset + cur_offset,
20176 depth,
20177 )?;
20178
20179 _prev_end_offset = cur_offset + envelope_size;
20180
20181 Ok(())
20182 }
20183 }
20184
20185 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20186 for BufferCollectionTokenGroupCreateChildrenSyncResponse
20187 {
20188 #[inline(always)]
20189 fn new_empty() -> Self {
20190 Self::default()
20191 }
20192
20193 unsafe fn decode(
20194 &mut self,
20195 decoder: &mut fidl::encoding::Decoder<
20196 '_,
20197 fidl::encoding::DefaultFuchsiaResourceDialect,
20198 >,
20199 offset: usize,
20200 mut depth: fidl::encoding::Depth,
20201 ) -> fidl::Result<()> {
20202 decoder.debug_check_bounds::<Self>(offset);
20203 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20204 None => return Err(fidl::Error::NotNullable),
20205 Some(len) => len,
20206 };
20207 // Calling decoder.out_of_line_offset(0) is not allowed.
20208 if len == 0 {
20209 return Ok(());
20210 };
20211 depth.increment()?;
20212 let envelope_size = 8;
20213 let bytes_len = len * envelope_size;
20214 let offset = decoder.out_of_line_offset(bytes_len)?;
20215 // Decode the envelope for each type.
20216 let mut _next_ordinal_to_read = 0;
20217 let mut next_offset = offset;
20218 let end_offset = offset + bytes_len;
20219 _next_ordinal_to_read += 1;
20220 if next_offset >= end_offset {
20221 return Ok(());
20222 }
20223
20224 // Decode unknown envelopes for gaps in ordinals.
20225 while _next_ordinal_to_read < 1 {
20226 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20227 _next_ordinal_to_read += 1;
20228 next_offset += envelope_size;
20229 }
20230
20231 let next_out_of_line = decoder.next_out_of_line();
20232 let handles_before = decoder.remaining_handles();
20233 if let Some((inlined, num_bytes, num_handles)) =
20234 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20235 {
20236 let member_inline_size = <fidl::encoding::Vector<
20237 fidl::encoding::Endpoint<
20238 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20239 >,
20240 64,
20241 > as fidl::encoding::TypeMarker>::inline_size(
20242 decoder.context
20243 );
20244 if inlined != (member_inline_size <= 4) {
20245 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20246 }
20247 let inner_offset;
20248 let mut inner_depth = depth.clone();
20249 if inlined {
20250 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20251 inner_offset = next_offset;
20252 } else {
20253 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20254 inner_depth.increment()?;
20255 }
20256 let val_ref = self.tokens.get_or_insert_with(|| {
20257 fidl::new_empty!(
20258 fidl::encoding::Vector<
20259 fidl::encoding::Endpoint<
20260 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20261 >,
20262 64,
20263 >,
20264 fidl::encoding::DefaultFuchsiaResourceDialect
20265 )
20266 });
20267 fidl::decode!(
20268 fidl::encoding::Vector<
20269 fidl::encoding::Endpoint<
20270 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20271 >,
20272 64,
20273 >,
20274 fidl::encoding::DefaultFuchsiaResourceDialect,
20275 val_ref,
20276 decoder,
20277 inner_offset,
20278 inner_depth
20279 )?;
20280 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20281 {
20282 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20283 }
20284 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20285 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20286 }
20287 }
20288
20289 next_offset += envelope_size;
20290
20291 // Decode the remaining unknown envelopes.
20292 while next_offset < end_offset {
20293 _next_ordinal_to_read += 1;
20294 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20295 next_offset += envelope_size;
20296 }
20297
20298 Ok(())
20299 }
20300 }
20301
20302 impl BufferCollectionTokenDuplicateSyncResponse {
20303 #[inline(always)]
20304 fn max_ordinal_present(&self) -> u64 {
20305 if let Some(_) = self.tokens {
20306 return 1;
20307 }
20308 0
20309 }
20310 }
20311
20312 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20313 type Borrowed<'a> = &'a mut Self;
20314 fn take_or_borrow<'a>(
20315 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20316 ) -> Self::Borrowed<'a> {
20317 value
20318 }
20319 }
20320
20321 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20322 type Owned = Self;
20323
20324 #[inline(always)]
20325 fn inline_align(_context: fidl::encoding::Context) -> usize {
20326 8
20327 }
20328
20329 #[inline(always)]
20330 fn inline_size(_context: fidl::encoding::Context) -> usize {
20331 16
20332 }
20333 }
20334
20335 unsafe impl
20336 fidl::encoding::Encode<
20337 BufferCollectionTokenDuplicateSyncResponse,
20338 fidl::encoding::DefaultFuchsiaResourceDialect,
20339 > for &mut BufferCollectionTokenDuplicateSyncResponse
20340 {
20341 unsafe fn encode(
20342 self,
20343 encoder: &mut fidl::encoding::Encoder<
20344 '_,
20345 fidl::encoding::DefaultFuchsiaResourceDialect,
20346 >,
20347 offset: usize,
20348 mut depth: fidl::encoding::Depth,
20349 ) -> fidl::Result<()> {
20350 encoder.debug_check_bounds::<BufferCollectionTokenDuplicateSyncResponse>(offset);
20351 // Vector header
20352 let max_ordinal: u64 = self.max_ordinal_present();
20353 encoder.write_num(max_ordinal, offset);
20354 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20355 // Calling encoder.out_of_line_offset(0) is not allowed.
20356 if max_ordinal == 0 {
20357 return Ok(());
20358 }
20359 depth.increment()?;
20360 let envelope_size = 8;
20361 let bytes_len = max_ordinal as usize * envelope_size;
20362 #[allow(unused_variables)]
20363 let offset = encoder.out_of_line_offset(bytes_len);
20364 let mut _prev_end_offset: usize = 0;
20365 if 1 > max_ordinal {
20366 return Ok(());
20367 }
20368
20369 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20370 // are envelope_size bytes.
20371 let cur_offset: usize = (1 - 1) * envelope_size;
20372
20373 // Zero reserved fields.
20374 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20375
20376 // Safety:
20377 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20378 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20379 // envelope_size bytes, there is always sufficient room.
20380 fidl::encoding::encode_in_envelope_optional::<
20381 fidl::encoding::Vector<
20382 fidl::encoding::Endpoint<
20383 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20384 >,
20385 64,
20386 >,
20387 fidl::encoding::DefaultFuchsiaResourceDialect,
20388 >(
20389 self.tokens.as_mut().map(
20390 <fidl::encoding::Vector<
20391 fidl::encoding::Endpoint<
20392 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20393 >,
20394 64,
20395 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20396 ),
20397 encoder,
20398 offset + cur_offset,
20399 depth,
20400 )?;
20401
20402 _prev_end_offset = cur_offset + envelope_size;
20403
20404 Ok(())
20405 }
20406 }
20407
20408 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20409 for BufferCollectionTokenDuplicateSyncResponse
20410 {
20411 #[inline(always)]
20412 fn new_empty() -> Self {
20413 Self::default()
20414 }
20415
20416 unsafe fn decode(
20417 &mut self,
20418 decoder: &mut fidl::encoding::Decoder<
20419 '_,
20420 fidl::encoding::DefaultFuchsiaResourceDialect,
20421 >,
20422 offset: usize,
20423 mut depth: fidl::encoding::Depth,
20424 ) -> fidl::Result<()> {
20425 decoder.debug_check_bounds::<Self>(offset);
20426 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20427 None => return Err(fidl::Error::NotNullable),
20428 Some(len) => len,
20429 };
20430 // Calling decoder.out_of_line_offset(0) is not allowed.
20431 if len == 0 {
20432 return Ok(());
20433 };
20434 depth.increment()?;
20435 let envelope_size = 8;
20436 let bytes_len = len * envelope_size;
20437 let offset = decoder.out_of_line_offset(bytes_len)?;
20438 // Decode the envelope for each type.
20439 let mut _next_ordinal_to_read = 0;
20440 let mut next_offset = offset;
20441 let end_offset = offset + bytes_len;
20442 _next_ordinal_to_read += 1;
20443 if next_offset >= end_offset {
20444 return Ok(());
20445 }
20446
20447 // Decode unknown envelopes for gaps in ordinals.
20448 while _next_ordinal_to_read < 1 {
20449 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20450 _next_ordinal_to_read += 1;
20451 next_offset += envelope_size;
20452 }
20453
20454 let next_out_of_line = decoder.next_out_of_line();
20455 let handles_before = decoder.remaining_handles();
20456 if let Some((inlined, num_bytes, num_handles)) =
20457 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20458 {
20459 let member_inline_size = <fidl::encoding::Vector<
20460 fidl::encoding::Endpoint<
20461 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20462 >,
20463 64,
20464 > as fidl::encoding::TypeMarker>::inline_size(
20465 decoder.context
20466 );
20467 if inlined != (member_inline_size <= 4) {
20468 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20469 }
20470 let inner_offset;
20471 let mut inner_depth = depth.clone();
20472 if inlined {
20473 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20474 inner_offset = next_offset;
20475 } else {
20476 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20477 inner_depth.increment()?;
20478 }
20479 let val_ref = self.tokens.get_or_insert_with(|| {
20480 fidl::new_empty!(
20481 fidl::encoding::Vector<
20482 fidl::encoding::Endpoint<
20483 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20484 >,
20485 64,
20486 >,
20487 fidl::encoding::DefaultFuchsiaResourceDialect
20488 )
20489 });
20490 fidl::decode!(
20491 fidl::encoding::Vector<
20492 fidl::encoding::Endpoint<
20493 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20494 >,
20495 64,
20496 >,
20497 fidl::encoding::DefaultFuchsiaResourceDialect,
20498 val_ref,
20499 decoder,
20500 inner_offset,
20501 inner_depth
20502 )?;
20503 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20504 {
20505 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20506 }
20507 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20508 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20509 }
20510 }
20511
20512 next_offset += envelope_size;
20513
20514 // Decode the remaining unknown envelopes.
20515 while next_offset < end_offset {
20516 _next_ordinal_to_read += 1;
20517 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20518 next_offset += envelope_size;
20519 }
20520
20521 Ok(())
20522 }
20523 }
20524
20525 impl BufferCollectionWaitForAllBuffersAllocatedResponse {
20526 #[inline(always)]
20527 fn max_ordinal_present(&self) -> u64 {
20528 if let Some(_) = self.buffer_collection_info {
20529 return 1;
20530 }
20531 0
20532 }
20533 }
20534
20535 impl fidl::encoding::ResourceTypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20536 type Borrowed<'a> = &'a mut Self;
20537 fn take_or_borrow<'a>(
20538 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20539 ) -> Self::Borrowed<'a> {
20540 value
20541 }
20542 }
20543
20544 unsafe impl fidl::encoding::TypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20545 type Owned = Self;
20546
20547 #[inline(always)]
20548 fn inline_align(_context: fidl::encoding::Context) -> usize {
20549 8
20550 }
20551
20552 #[inline(always)]
20553 fn inline_size(_context: fidl::encoding::Context) -> usize {
20554 16
20555 }
20556 }
20557
20558 unsafe impl
20559 fidl::encoding::Encode<
20560 BufferCollectionWaitForAllBuffersAllocatedResponse,
20561 fidl::encoding::DefaultFuchsiaResourceDialect,
20562 > for &mut BufferCollectionWaitForAllBuffersAllocatedResponse
20563 {
20564 unsafe fn encode(
20565 self,
20566 encoder: &mut fidl::encoding::Encoder<
20567 '_,
20568 fidl::encoding::DefaultFuchsiaResourceDialect,
20569 >,
20570 offset: usize,
20571 mut depth: fidl::encoding::Depth,
20572 ) -> fidl::Result<()> {
20573 encoder
20574 .debug_check_bounds::<BufferCollectionWaitForAllBuffersAllocatedResponse>(offset);
20575 // Vector header
20576 let max_ordinal: u64 = self.max_ordinal_present();
20577 encoder.write_num(max_ordinal, offset);
20578 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20579 // Calling encoder.out_of_line_offset(0) is not allowed.
20580 if max_ordinal == 0 {
20581 return Ok(());
20582 }
20583 depth.increment()?;
20584 let envelope_size = 8;
20585 let bytes_len = max_ordinal as usize * envelope_size;
20586 #[allow(unused_variables)]
20587 let offset = encoder.out_of_line_offset(bytes_len);
20588 let mut _prev_end_offset: usize = 0;
20589 if 1 > max_ordinal {
20590 return Ok(());
20591 }
20592
20593 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20594 // are envelope_size bytes.
20595 let cur_offset: usize = (1 - 1) * envelope_size;
20596
20597 // Zero reserved fields.
20598 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20599
20600 // Safety:
20601 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20602 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20603 // envelope_size bytes, there is always sufficient room.
20604 fidl::encoding::encode_in_envelope_optional::<
20605 BufferCollectionInfo,
20606 fidl::encoding::DefaultFuchsiaResourceDialect,
20607 >(
20608 self.buffer_collection_info.as_mut().map(
20609 <BufferCollectionInfo as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20610 ),
20611 encoder,
20612 offset + cur_offset,
20613 depth,
20614 )?;
20615
20616 _prev_end_offset = cur_offset + envelope_size;
20617
20618 Ok(())
20619 }
20620 }
20621
20622 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20623 for BufferCollectionWaitForAllBuffersAllocatedResponse
20624 {
20625 #[inline(always)]
20626 fn new_empty() -> Self {
20627 Self::default()
20628 }
20629
20630 unsafe fn decode(
20631 &mut self,
20632 decoder: &mut fidl::encoding::Decoder<
20633 '_,
20634 fidl::encoding::DefaultFuchsiaResourceDialect,
20635 >,
20636 offset: usize,
20637 mut depth: fidl::encoding::Depth,
20638 ) -> fidl::Result<()> {
20639 decoder.debug_check_bounds::<Self>(offset);
20640 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20641 None => return Err(fidl::Error::NotNullable),
20642 Some(len) => len,
20643 };
20644 // Calling decoder.out_of_line_offset(0) is not allowed.
20645 if len == 0 {
20646 return Ok(());
20647 };
20648 depth.increment()?;
20649 let envelope_size = 8;
20650 let bytes_len = len * envelope_size;
20651 let offset = decoder.out_of_line_offset(bytes_len)?;
20652 // Decode the envelope for each type.
20653 let mut _next_ordinal_to_read = 0;
20654 let mut next_offset = offset;
20655 let end_offset = offset + bytes_len;
20656 _next_ordinal_to_read += 1;
20657 if next_offset >= end_offset {
20658 return Ok(());
20659 }
20660
20661 // Decode unknown envelopes for gaps in ordinals.
20662 while _next_ordinal_to_read < 1 {
20663 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20664 _next_ordinal_to_read += 1;
20665 next_offset += envelope_size;
20666 }
20667
20668 let next_out_of_line = decoder.next_out_of_line();
20669 let handles_before = decoder.remaining_handles();
20670 if let Some((inlined, num_bytes, num_handles)) =
20671 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20672 {
20673 let member_inline_size =
20674 <BufferCollectionInfo as fidl::encoding::TypeMarker>::inline_size(
20675 decoder.context,
20676 );
20677 if inlined != (member_inline_size <= 4) {
20678 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20679 }
20680 let inner_offset;
20681 let mut inner_depth = depth.clone();
20682 if inlined {
20683 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20684 inner_offset = next_offset;
20685 } else {
20686 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20687 inner_depth.increment()?;
20688 }
20689 let val_ref = self.buffer_collection_info.get_or_insert_with(|| {
20690 fidl::new_empty!(
20691 BufferCollectionInfo,
20692 fidl::encoding::DefaultFuchsiaResourceDialect
20693 )
20694 });
20695 fidl::decode!(
20696 BufferCollectionInfo,
20697 fidl::encoding::DefaultFuchsiaResourceDialect,
20698 val_ref,
20699 decoder,
20700 inner_offset,
20701 inner_depth
20702 )?;
20703 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20704 {
20705 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20706 }
20707 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20708 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20709 }
20710 }
20711
20712 next_offset += envelope_size;
20713
20714 // Decode the remaining unknown envelopes.
20715 while next_offset < end_offset {
20716 _next_ordinal_to_read += 1;
20717 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20718 next_offset += envelope_size;
20719 }
20720
20721 Ok(())
20722 }
20723 }
20724
20725 impl NodeAttachNodeTrackingRequest {
20726 #[inline(always)]
20727 fn max_ordinal_present(&self) -> u64 {
20728 if let Some(_) = self.server_end {
20729 return 1;
20730 }
20731 0
20732 }
20733 }
20734
20735 impl fidl::encoding::ResourceTypeMarker for NodeAttachNodeTrackingRequest {
20736 type Borrowed<'a> = &'a mut Self;
20737 fn take_or_borrow<'a>(
20738 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20739 ) -> Self::Borrowed<'a> {
20740 value
20741 }
20742 }
20743
20744 unsafe impl fidl::encoding::TypeMarker for NodeAttachNodeTrackingRequest {
20745 type Owned = Self;
20746
20747 #[inline(always)]
20748 fn inline_align(_context: fidl::encoding::Context) -> usize {
20749 8
20750 }
20751
20752 #[inline(always)]
20753 fn inline_size(_context: fidl::encoding::Context) -> usize {
20754 16
20755 }
20756 }
20757
20758 unsafe impl
20759 fidl::encoding::Encode<
20760 NodeAttachNodeTrackingRequest,
20761 fidl::encoding::DefaultFuchsiaResourceDialect,
20762 > for &mut NodeAttachNodeTrackingRequest
20763 {
20764 unsafe fn encode(
20765 self,
20766 encoder: &mut fidl::encoding::Encoder<
20767 '_,
20768 fidl::encoding::DefaultFuchsiaResourceDialect,
20769 >,
20770 offset: usize,
20771 mut depth: fidl::encoding::Depth,
20772 ) -> fidl::Result<()> {
20773 encoder.debug_check_bounds::<NodeAttachNodeTrackingRequest>(offset);
20774 // Vector header
20775 let max_ordinal: u64 = self.max_ordinal_present();
20776 encoder.write_num(max_ordinal, offset);
20777 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20778 // Calling encoder.out_of_line_offset(0) is not allowed.
20779 if max_ordinal == 0 {
20780 return Ok(());
20781 }
20782 depth.increment()?;
20783 let envelope_size = 8;
20784 let bytes_len = max_ordinal as usize * envelope_size;
20785 #[allow(unused_variables)]
20786 let offset = encoder.out_of_line_offset(bytes_len);
20787 let mut _prev_end_offset: usize = 0;
20788 if 1 > max_ordinal {
20789 return Ok(());
20790 }
20791
20792 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20793 // are envelope_size bytes.
20794 let cur_offset: usize = (1 - 1) * envelope_size;
20795
20796 // Zero reserved fields.
20797 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20798
20799 // Safety:
20800 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20801 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20802 // envelope_size bytes, there is always sufficient room.
20803 fidl::encoding::encode_in_envelope_optional::<
20804 fidl::encoding::HandleType<
20805 fidl::EventPair,
20806 { fidl::ObjectType::EVENTPAIR.into_raw() },
20807 2147483648,
20808 >,
20809 fidl::encoding::DefaultFuchsiaResourceDialect,
20810 >(
20811 self.server_end.as_mut().map(
20812 <fidl::encoding::HandleType<
20813 fidl::EventPair,
20814 { fidl::ObjectType::EVENTPAIR.into_raw() },
20815 2147483648,
20816 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20817 ),
20818 encoder,
20819 offset + cur_offset,
20820 depth,
20821 )?;
20822
20823 _prev_end_offset = cur_offset + envelope_size;
20824
20825 Ok(())
20826 }
20827 }
20828
20829 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20830 for NodeAttachNodeTrackingRequest
20831 {
20832 #[inline(always)]
20833 fn new_empty() -> Self {
20834 Self::default()
20835 }
20836
20837 unsafe fn decode(
20838 &mut self,
20839 decoder: &mut fidl::encoding::Decoder<
20840 '_,
20841 fidl::encoding::DefaultFuchsiaResourceDialect,
20842 >,
20843 offset: usize,
20844 mut depth: fidl::encoding::Depth,
20845 ) -> fidl::Result<()> {
20846 decoder.debug_check_bounds::<Self>(offset);
20847 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20848 None => return Err(fidl::Error::NotNullable),
20849 Some(len) => len,
20850 };
20851 // Calling decoder.out_of_line_offset(0) is not allowed.
20852 if len == 0 {
20853 return Ok(());
20854 };
20855 depth.increment()?;
20856 let envelope_size = 8;
20857 let bytes_len = len * envelope_size;
20858 let offset = decoder.out_of_line_offset(bytes_len)?;
20859 // Decode the envelope for each type.
20860 let mut _next_ordinal_to_read = 0;
20861 let mut next_offset = offset;
20862 let end_offset = offset + bytes_len;
20863 _next_ordinal_to_read += 1;
20864 if next_offset >= end_offset {
20865 return Ok(());
20866 }
20867
20868 // Decode unknown envelopes for gaps in ordinals.
20869 while _next_ordinal_to_read < 1 {
20870 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20871 _next_ordinal_to_read += 1;
20872 next_offset += envelope_size;
20873 }
20874
20875 let next_out_of_line = decoder.next_out_of_line();
20876 let handles_before = decoder.remaining_handles();
20877 if let Some((inlined, num_bytes, num_handles)) =
20878 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20879 {
20880 let member_inline_size = <fidl::encoding::HandleType<
20881 fidl::EventPair,
20882 { fidl::ObjectType::EVENTPAIR.into_raw() },
20883 2147483648,
20884 > as fidl::encoding::TypeMarker>::inline_size(
20885 decoder.context
20886 );
20887 if inlined != (member_inline_size <= 4) {
20888 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20889 }
20890 let inner_offset;
20891 let mut inner_depth = depth.clone();
20892 if inlined {
20893 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20894 inner_offset = next_offset;
20895 } else {
20896 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20897 inner_depth.increment()?;
20898 }
20899 let val_ref =
20900 self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
20901 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
20902 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20903 {
20904 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20905 }
20906 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20907 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20908 }
20909 }
20910
20911 next_offset += envelope_size;
20912
20913 // Decode the remaining unknown envelopes.
20914 while next_offset < end_offset {
20915 _next_ordinal_to_read += 1;
20916 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20917 next_offset += envelope_size;
20918 }
20919
20920 Ok(())
20921 }
20922 }
20923
20924 impl NodeIsAlternateForRequest {
20925 #[inline(always)]
20926 fn max_ordinal_present(&self) -> u64 {
20927 if let Some(_) = self.node_ref {
20928 return 1;
20929 }
20930 0
20931 }
20932 }
20933
20934 impl fidl::encoding::ResourceTypeMarker for NodeIsAlternateForRequest {
20935 type Borrowed<'a> = &'a mut Self;
20936 fn take_or_borrow<'a>(
20937 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20938 ) -> Self::Borrowed<'a> {
20939 value
20940 }
20941 }
20942
20943 unsafe impl fidl::encoding::TypeMarker for NodeIsAlternateForRequest {
20944 type Owned = Self;
20945
20946 #[inline(always)]
20947 fn inline_align(_context: fidl::encoding::Context) -> usize {
20948 8
20949 }
20950
20951 #[inline(always)]
20952 fn inline_size(_context: fidl::encoding::Context) -> usize {
20953 16
20954 }
20955 }
20956
20957 unsafe impl
20958 fidl::encoding::Encode<
20959 NodeIsAlternateForRequest,
20960 fidl::encoding::DefaultFuchsiaResourceDialect,
20961 > for &mut NodeIsAlternateForRequest
20962 {
20963 unsafe fn encode(
20964 self,
20965 encoder: &mut fidl::encoding::Encoder<
20966 '_,
20967 fidl::encoding::DefaultFuchsiaResourceDialect,
20968 >,
20969 offset: usize,
20970 mut depth: fidl::encoding::Depth,
20971 ) -> fidl::Result<()> {
20972 encoder.debug_check_bounds::<NodeIsAlternateForRequest>(offset);
20973 // Vector header
20974 let max_ordinal: u64 = self.max_ordinal_present();
20975 encoder.write_num(max_ordinal, offset);
20976 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20977 // Calling encoder.out_of_line_offset(0) is not allowed.
20978 if max_ordinal == 0 {
20979 return Ok(());
20980 }
20981 depth.increment()?;
20982 let envelope_size = 8;
20983 let bytes_len = max_ordinal as usize * envelope_size;
20984 #[allow(unused_variables)]
20985 let offset = encoder.out_of_line_offset(bytes_len);
20986 let mut _prev_end_offset: usize = 0;
20987 if 1 > max_ordinal {
20988 return Ok(());
20989 }
20990
20991 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20992 // are envelope_size bytes.
20993 let cur_offset: usize = (1 - 1) * envelope_size;
20994
20995 // Zero reserved fields.
20996 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20997
20998 // Safety:
20999 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21000 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21001 // envelope_size bytes, there is always sufficient room.
21002 fidl::encoding::encode_in_envelope_optional::<
21003 fidl::encoding::HandleType<
21004 fidl::Event,
21005 { fidl::ObjectType::EVENT.into_raw() },
21006 2147483648,
21007 >,
21008 fidl::encoding::DefaultFuchsiaResourceDialect,
21009 >(
21010 self.node_ref.as_mut().map(
21011 <fidl::encoding::HandleType<
21012 fidl::Event,
21013 { fidl::ObjectType::EVENT.into_raw() },
21014 2147483648,
21015 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21016 ),
21017 encoder,
21018 offset + cur_offset,
21019 depth,
21020 )?;
21021
21022 _prev_end_offset = cur_offset + envelope_size;
21023
21024 Ok(())
21025 }
21026 }
21027
21028 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21029 for NodeIsAlternateForRequest
21030 {
21031 #[inline(always)]
21032 fn new_empty() -> Self {
21033 Self::default()
21034 }
21035
21036 unsafe fn decode(
21037 &mut self,
21038 decoder: &mut fidl::encoding::Decoder<
21039 '_,
21040 fidl::encoding::DefaultFuchsiaResourceDialect,
21041 >,
21042 offset: usize,
21043 mut depth: fidl::encoding::Depth,
21044 ) -> fidl::Result<()> {
21045 decoder.debug_check_bounds::<Self>(offset);
21046 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21047 None => return Err(fidl::Error::NotNullable),
21048 Some(len) => len,
21049 };
21050 // Calling decoder.out_of_line_offset(0) is not allowed.
21051 if len == 0 {
21052 return Ok(());
21053 };
21054 depth.increment()?;
21055 let envelope_size = 8;
21056 let bytes_len = len * envelope_size;
21057 let offset = decoder.out_of_line_offset(bytes_len)?;
21058 // Decode the envelope for each type.
21059 let mut _next_ordinal_to_read = 0;
21060 let mut next_offset = offset;
21061 let end_offset = offset + bytes_len;
21062 _next_ordinal_to_read += 1;
21063 if next_offset >= end_offset {
21064 return Ok(());
21065 }
21066
21067 // Decode unknown envelopes for gaps in ordinals.
21068 while _next_ordinal_to_read < 1 {
21069 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21070 _next_ordinal_to_read += 1;
21071 next_offset += envelope_size;
21072 }
21073
21074 let next_out_of_line = decoder.next_out_of_line();
21075 let handles_before = decoder.remaining_handles();
21076 if let Some((inlined, num_bytes, num_handles)) =
21077 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21078 {
21079 let member_inline_size = <fidl::encoding::HandleType<
21080 fidl::Event,
21081 { fidl::ObjectType::EVENT.into_raw() },
21082 2147483648,
21083 > as fidl::encoding::TypeMarker>::inline_size(
21084 decoder.context
21085 );
21086 if inlined != (member_inline_size <= 4) {
21087 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21088 }
21089 let inner_offset;
21090 let mut inner_depth = depth.clone();
21091 if inlined {
21092 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21093 inner_offset = next_offset;
21094 } else {
21095 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21096 inner_depth.increment()?;
21097 }
21098 let val_ref =
21099 self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21100 fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21101 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21102 {
21103 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21104 }
21105 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21106 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21107 }
21108 }
21109
21110 next_offset += envelope_size;
21111
21112 // Decode the remaining unknown envelopes.
21113 while next_offset < end_offset {
21114 _next_ordinal_to_read += 1;
21115 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21116 next_offset += envelope_size;
21117 }
21118
21119 Ok(())
21120 }
21121 }
21122
21123 impl NodeSetWeakOkRequest {
21124 #[inline(always)]
21125 fn max_ordinal_present(&self) -> u64 {
21126 if let Some(_) = self.for_child_nodes_also {
21127 return 1;
21128 }
21129 0
21130 }
21131 }
21132
21133 impl fidl::encoding::ResourceTypeMarker for NodeSetWeakOkRequest {
21134 type Borrowed<'a> = &'a mut Self;
21135 fn take_or_borrow<'a>(
21136 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21137 ) -> Self::Borrowed<'a> {
21138 value
21139 }
21140 }
21141
21142 unsafe impl fidl::encoding::TypeMarker for NodeSetWeakOkRequest {
21143 type Owned = Self;
21144
21145 #[inline(always)]
21146 fn inline_align(_context: fidl::encoding::Context) -> usize {
21147 8
21148 }
21149
21150 #[inline(always)]
21151 fn inline_size(_context: fidl::encoding::Context) -> usize {
21152 16
21153 }
21154 }
21155
21156 unsafe impl
21157 fidl::encoding::Encode<NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect>
21158 for &mut NodeSetWeakOkRequest
21159 {
21160 unsafe fn encode(
21161 self,
21162 encoder: &mut fidl::encoding::Encoder<
21163 '_,
21164 fidl::encoding::DefaultFuchsiaResourceDialect,
21165 >,
21166 offset: usize,
21167 mut depth: fidl::encoding::Depth,
21168 ) -> fidl::Result<()> {
21169 encoder.debug_check_bounds::<NodeSetWeakOkRequest>(offset);
21170 // Vector header
21171 let max_ordinal: u64 = self.max_ordinal_present();
21172 encoder.write_num(max_ordinal, offset);
21173 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21174 // Calling encoder.out_of_line_offset(0) is not allowed.
21175 if max_ordinal == 0 {
21176 return Ok(());
21177 }
21178 depth.increment()?;
21179 let envelope_size = 8;
21180 let bytes_len = max_ordinal as usize * envelope_size;
21181 #[allow(unused_variables)]
21182 let offset = encoder.out_of_line_offset(bytes_len);
21183 let mut _prev_end_offset: usize = 0;
21184 if 1 > max_ordinal {
21185 return Ok(());
21186 }
21187
21188 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21189 // are envelope_size bytes.
21190 let cur_offset: usize = (1 - 1) * envelope_size;
21191
21192 // Zero reserved fields.
21193 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21194
21195 // Safety:
21196 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21197 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21198 // envelope_size bytes, there is always sufficient room.
21199 fidl::encoding::encode_in_envelope_optional::<
21200 bool,
21201 fidl::encoding::DefaultFuchsiaResourceDialect,
21202 >(
21203 self.for_child_nodes_also
21204 .as_ref()
21205 .map(<bool as fidl::encoding::ValueTypeMarker>::borrow),
21206 encoder,
21207 offset + cur_offset,
21208 depth,
21209 )?;
21210
21211 _prev_end_offset = cur_offset + envelope_size;
21212
21213 Ok(())
21214 }
21215 }
21216
21217 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21218 for NodeSetWeakOkRequest
21219 {
21220 #[inline(always)]
21221 fn new_empty() -> Self {
21222 Self::default()
21223 }
21224
21225 unsafe fn decode(
21226 &mut self,
21227 decoder: &mut fidl::encoding::Decoder<
21228 '_,
21229 fidl::encoding::DefaultFuchsiaResourceDialect,
21230 >,
21231 offset: usize,
21232 mut depth: fidl::encoding::Depth,
21233 ) -> fidl::Result<()> {
21234 decoder.debug_check_bounds::<Self>(offset);
21235 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21236 None => return Err(fidl::Error::NotNullable),
21237 Some(len) => len,
21238 };
21239 // Calling decoder.out_of_line_offset(0) is not allowed.
21240 if len == 0 {
21241 return Ok(());
21242 };
21243 depth.increment()?;
21244 let envelope_size = 8;
21245 let bytes_len = len * envelope_size;
21246 let offset = decoder.out_of_line_offset(bytes_len)?;
21247 // Decode the envelope for each type.
21248 let mut _next_ordinal_to_read = 0;
21249 let mut next_offset = offset;
21250 let end_offset = offset + bytes_len;
21251 _next_ordinal_to_read += 1;
21252 if next_offset >= end_offset {
21253 return Ok(());
21254 }
21255
21256 // Decode unknown envelopes for gaps in ordinals.
21257 while _next_ordinal_to_read < 1 {
21258 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21259 _next_ordinal_to_read += 1;
21260 next_offset += envelope_size;
21261 }
21262
21263 let next_out_of_line = decoder.next_out_of_line();
21264 let handles_before = decoder.remaining_handles();
21265 if let Some((inlined, num_bytes, num_handles)) =
21266 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21267 {
21268 let member_inline_size =
21269 <bool as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21270 if inlined != (member_inline_size <= 4) {
21271 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21272 }
21273 let inner_offset;
21274 let mut inner_depth = depth.clone();
21275 if inlined {
21276 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21277 inner_offset = next_offset;
21278 } else {
21279 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21280 inner_depth.increment()?;
21281 }
21282 let val_ref = self.for_child_nodes_also.get_or_insert_with(|| {
21283 fidl::new_empty!(bool, fidl::encoding::DefaultFuchsiaResourceDialect)
21284 });
21285 fidl::decode!(
21286 bool,
21287 fidl::encoding::DefaultFuchsiaResourceDialect,
21288 val_ref,
21289 decoder,
21290 inner_offset,
21291 inner_depth
21292 )?;
21293 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21294 {
21295 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21296 }
21297 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21298 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21299 }
21300 }
21301
21302 next_offset += envelope_size;
21303
21304 // Decode the remaining unknown envelopes.
21305 while next_offset < end_offset {
21306 _next_ordinal_to_read += 1;
21307 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21308 next_offset += envelope_size;
21309 }
21310
21311 Ok(())
21312 }
21313 }
21314
21315 impl NodeGetNodeRefResponse {
21316 #[inline(always)]
21317 fn max_ordinal_present(&self) -> u64 {
21318 if let Some(_) = self.node_ref {
21319 return 1;
21320 }
21321 0
21322 }
21323 }
21324
21325 impl fidl::encoding::ResourceTypeMarker for NodeGetNodeRefResponse {
21326 type Borrowed<'a> = &'a mut Self;
21327 fn take_or_borrow<'a>(
21328 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21329 ) -> Self::Borrowed<'a> {
21330 value
21331 }
21332 }
21333
21334 unsafe impl fidl::encoding::TypeMarker for NodeGetNodeRefResponse {
21335 type Owned = Self;
21336
21337 #[inline(always)]
21338 fn inline_align(_context: fidl::encoding::Context) -> usize {
21339 8
21340 }
21341
21342 #[inline(always)]
21343 fn inline_size(_context: fidl::encoding::Context) -> usize {
21344 16
21345 }
21346 }
21347
21348 unsafe impl
21349 fidl::encoding::Encode<
21350 NodeGetNodeRefResponse,
21351 fidl::encoding::DefaultFuchsiaResourceDialect,
21352 > for &mut NodeGetNodeRefResponse
21353 {
21354 unsafe fn encode(
21355 self,
21356 encoder: &mut fidl::encoding::Encoder<
21357 '_,
21358 fidl::encoding::DefaultFuchsiaResourceDialect,
21359 >,
21360 offset: usize,
21361 mut depth: fidl::encoding::Depth,
21362 ) -> fidl::Result<()> {
21363 encoder.debug_check_bounds::<NodeGetNodeRefResponse>(offset);
21364 // Vector header
21365 let max_ordinal: u64 = self.max_ordinal_present();
21366 encoder.write_num(max_ordinal, offset);
21367 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21368 // Calling encoder.out_of_line_offset(0) is not allowed.
21369 if max_ordinal == 0 {
21370 return Ok(());
21371 }
21372 depth.increment()?;
21373 let envelope_size = 8;
21374 let bytes_len = max_ordinal as usize * envelope_size;
21375 #[allow(unused_variables)]
21376 let offset = encoder.out_of_line_offset(bytes_len);
21377 let mut _prev_end_offset: usize = 0;
21378 if 1 > max_ordinal {
21379 return Ok(());
21380 }
21381
21382 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21383 // are envelope_size bytes.
21384 let cur_offset: usize = (1 - 1) * envelope_size;
21385
21386 // Zero reserved fields.
21387 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21388
21389 // Safety:
21390 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21391 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21392 // envelope_size bytes, there is always sufficient room.
21393 fidl::encoding::encode_in_envelope_optional::<
21394 fidl::encoding::HandleType<
21395 fidl::Event,
21396 { fidl::ObjectType::EVENT.into_raw() },
21397 2147483648,
21398 >,
21399 fidl::encoding::DefaultFuchsiaResourceDialect,
21400 >(
21401 self.node_ref.as_mut().map(
21402 <fidl::encoding::HandleType<
21403 fidl::Event,
21404 { fidl::ObjectType::EVENT.into_raw() },
21405 2147483648,
21406 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21407 ),
21408 encoder,
21409 offset + cur_offset,
21410 depth,
21411 )?;
21412
21413 _prev_end_offset = cur_offset + envelope_size;
21414
21415 Ok(())
21416 }
21417 }
21418
21419 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21420 for NodeGetNodeRefResponse
21421 {
21422 #[inline(always)]
21423 fn new_empty() -> Self {
21424 Self::default()
21425 }
21426
21427 unsafe fn decode(
21428 &mut self,
21429 decoder: &mut fidl::encoding::Decoder<
21430 '_,
21431 fidl::encoding::DefaultFuchsiaResourceDialect,
21432 >,
21433 offset: usize,
21434 mut depth: fidl::encoding::Depth,
21435 ) -> fidl::Result<()> {
21436 decoder.debug_check_bounds::<Self>(offset);
21437 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21438 None => return Err(fidl::Error::NotNullable),
21439 Some(len) => len,
21440 };
21441 // Calling decoder.out_of_line_offset(0) is not allowed.
21442 if len == 0 {
21443 return Ok(());
21444 };
21445 depth.increment()?;
21446 let envelope_size = 8;
21447 let bytes_len = len * envelope_size;
21448 let offset = decoder.out_of_line_offset(bytes_len)?;
21449 // Decode the envelope for each type.
21450 let mut _next_ordinal_to_read = 0;
21451 let mut next_offset = offset;
21452 let end_offset = offset + bytes_len;
21453 _next_ordinal_to_read += 1;
21454 if next_offset >= end_offset {
21455 return Ok(());
21456 }
21457
21458 // Decode unknown envelopes for gaps in ordinals.
21459 while _next_ordinal_to_read < 1 {
21460 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21461 _next_ordinal_to_read += 1;
21462 next_offset += envelope_size;
21463 }
21464
21465 let next_out_of_line = decoder.next_out_of_line();
21466 let handles_before = decoder.remaining_handles();
21467 if let Some((inlined, num_bytes, num_handles)) =
21468 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21469 {
21470 let member_inline_size = <fidl::encoding::HandleType<
21471 fidl::Event,
21472 { fidl::ObjectType::EVENT.into_raw() },
21473 2147483648,
21474 > as fidl::encoding::TypeMarker>::inline_size(
21475 decoder.context
21476 );
21477 if inlined != (member_inline_size <= 4) {
21478 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21479 }
21480 let inner_offset;
21481 let mut inner_depth = depth.clone();
21482 if inlined {
21483 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21484 inner_offset = next_offset;
21485 } else {
21486 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21487 inner_depth.increment()?;
21488 }
21489 let val_ref =
21490 self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21491 fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21492 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21493 {
21494 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21495 }
21496 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21497 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21498 }
21499 }
21500
21501 next_offset += envelope_size;
21502
21503 // Decode the remaining unknown envelopes.
21504 while next_offset < end_offset {
21505 _next_ordinal_to_read += 1;
21506 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21507 next_offset += envelope_size;
21508 }
21509
21510 Ok(())
21511 }
21512 }
21513
21514 impl VmoBuffer {
21515 #[inline(always)]
21516 fn max_ordinal_present(&self) -> u64 {
21517 if let Some(_) = self.close_weak_asap {
21518 return 3;
21519 }
21520 if let Some(_) = self.vmo_usable_start {
21521 return 2;
21522 }
21523 if let Some(_) = self.vmo {
21524 return 1;
21525 }
21526 0
21527 }
21528 }
21529
21530 impl fidl::encoding::ResourceTypeMarker for VmoBuffer {
21531 type Borrowed<'a> = &'a mut Self;
21532 fn take_or_borrow<'a>(
21533 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21534 ) -> Self::Borrowed<'a> {
21535 value
21536 }
21537 }
21538
21539 unsafe impl fidl::encoding::TypeMarker for VmoBuffer {
21540 type Owned = Self;
21541
21542 #[inline(always)]
21543 fn inline_align(_context: fidl::encoding::Context) -> usize {
21544 8
21545 }
21546
21547 #[inline(always)]
21548 fn inline_size(_context: fidl::encoding::Context) -> usize {
21549 16
21550 }
21551 }
21552
21553 unsafe impl fidl::encoding::Encode<VmoBuffer, fidl::encoding::DefaultFuchsiaResourceDialect>
21554 for &mut VmoBuffer
21555 {
21556 unsafe fn encode(
21557 self,
21558 encoder: &mut fidl::encoding::Encoder<
21559 '_,
21560 fidl::encoding::DefaultFuchsiaResourceDialect,
21561 >,
21562 offset: usize,
21563 mut depth: fidl::encoding::Depth,
21564 ) -> fidl::Result<()> {
21565 encoder.debug_check_bounds::<VmoBuffer>(offset);
21566 // Vector header
21567 let max_ordinal: u64 = self.max_ordinal_present();
21568 encoder.write_num(max_ordinal, offset);
21569 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21570 // Calling encoder.out_of_line_offset(0) is not allowed.
21571 if max_ordinal == 0 {
21572 return Ok(());
21573 }
21574 depth.increment()?;
21575 let envelope_size = 8;
21576 let bytes_len = max_ordinal as usize * envelope_size;
21577 #[allow(unused_variables)]
21578 let offset = encoder.out_of_line_offset(bytes_len);
21579 let mut _prev_end_offset: usize = 0;
21580 if 1 > max_ordinal {
21581 return Ok(());
21582 }
21583
21584 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21585 // are envelope_size bytes.
21586 let cur_offset: usize = (1 - 1) * envelope_size;
21587
21588 // Zero reserved fields.
21589 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21590
21591 // Safety:
21592 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21593 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21594 // envelope_size bytes, there is always sufficient room.
21595 fidl::encoding::encode_in_envelope_optional::<
21596 fidl::encoding::HandleType<
21597 fidl::Vmo,
21598 { fidl::ObjectType::VMO.into_raw() },
21599 2147483648,
21600 >,
21601 fidl::encoding::DefaultFuchsiaResourceDialect,
21602 >(
21603 self.vmo.as_mut().map(
21604 <fidl::encoding::HandleType<
21605 fidl::Vmo,
21606 { fidl::ObjectType::VMO.into_raw() },
21607 2147483648,
21608 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21609 ),
21610 encoder,
21611 offset + cur_offset,
21612 depth,
21613 )?;
21614
21615 _prev_end_offset = cur_offset + envelope_size;
21616 if 2 > max_ordinal {
21617 return Ok(());
21618 }
21619
21620 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21621 // are envelope_size bytes.
21622 let cur_offset: usize = (2 - 1) * envelope_size;
21623
21624 // Zero reserved fields.
21625 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21626
21627 // Safety:
21628 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21629 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21630 // envelope_size bytes, there is always sufficient room.
21631 fidl::encoding::encode_in_envelope_optional::<
21632 u64,
21633 fidl::encoding::DefaultFuchsiaResourceDialect,
21634 >(
21635 self.vmo_usable_start
21636 .as_ref()
21637 .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
21638 encoder,
21639 offset + cur_offset,
21640 depth,
21641 )?;
21642
21643 _prev_end_offset = cur_offset + envelope_size;
21644 if 3 > max_ordinal {
21645 return Ok(());
21646 }
21647
21648 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21649 // are envelope_size bytes.
21650 let cur_offset: usize = (3 - 1) * envelope_size;
21651
21652 // Zero reserved fields.
21653 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21654
21655 // Safety:
21656 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21657 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21658 // envelope_size bytes, there is always sufficient room.
21659 fidl::encoding::encode_in_envelope_optional::<
21660 fidl::encoding::HandleType<
21661 fidl::EventPair,
21662 { fidl::ObjectType::EVENTPAIR.into_raw() },
21663 2147483648,
21664 >,
21665 fidl::encoding::DefaultFuchsiaResourceDialect,
21666 >(
21667 self.close_weak_asap.as_mut().map(
21668 <fidl::encoding::HandleType<
21669 fidl::EventPair,
21670 { fidl::ObjectType::EVENTPAIR.into_raw() },
21671 2147483648,
21672 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21673 ),
21674 encoder,
21675 offset + cur_offset,
21676 depth,
21677 )?;
21678
21679 _prev_end_offset = cur_offset + envelope_size;
21680
21681 Ok(())
21682 }
21683 }
21684
21685 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {
21686 #[inline(always)]
21687 fn new_empty() -> Self {
21688 Self::default()
21689 }
21690
21691 unsafe fn decode(
21692 &mut self,
21693 decoder: &mut fidl::encoding::Decoder<
21694 '_,
21695 fidl::encoding::DefaultFuchsiaResourceDialect,
21696 >,
21697 offset: usize,
21698 mut depth: fidl::encoding::Depth,
21699 ) -> fidl::Result<()> {
21700 decoder.debug_check_bounds::<Self>(offset);
21701 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21702 None => return Err(fidl::Error::NotNullable),
21703 Some(len) => len,
21704 };
21705 // Calling decoder.out_of_line_offset(0) is not allowed.
21706 if len == 0 {
21707 return Ok(());
21708 };
21709 depth.increment()?;
21710 let envelope_size = 8;
21711 let bytes_len = len * envelope_size;
21712 let offset = decoder.out_of_line_offset(bytes_len)?;
21713 // Decode the envelope for each type.
21714 let mut _next_ordinal_to_read = 0;
21715 let mut next_offset = offset;
21716 let end_offset = offset + bytes_len;
21717 _next_ordinal_to_read += 1;
21718 if next_offset >= end_offset {
21719 return Ok(());
21720 }
21721
21722 // Decode unknown envelopes for gaps in ordinals.
21723 while _next_ordinal_to_read < 1 {
21724 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21725 _next_ordinal_to_read += 1;
21726 next_offset += envelope_size;
21727 }
21728
21729 let next_out_of_line = decoder.next_out_of_line();
21730 let handles_before = decoder.remaining_handles();
21731 if let Some((inlined, num_bytes, num_handles)) =
21732 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21733 {
21734 let member_inline_size = <fidl::encoding::HandleType<
21735 fidl::Vmo,
21736 { fidl::ObjectType::VMO.into_raw() },
21737 2147483648,
21738 > as fidl::encoding::TypeMarker>::inline_size(
21739 decoder.context
21740 );
21741 if inlined != (member_inline_size <= 4) {
21742 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21743 }
21744 let inner_offset;
21745 let mut inner_depth = depth.clone();
21746 if inlined {
21747 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21748 inner_offset = next_offset;
21749 } else {
21750 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21751 inner_depth.increment()?;
21752 }
21753 let val_ref =
21754 self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21755 fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21756 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21757 {
21758 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21759 }
21760 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21761 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21762 }
21763 }
21764
21765 next_offset += envelope_size;
21766 _next_ordinal_to_read += 1;
21767 if next_offset >= end_offset {
21768 return Ok(());
21769 }
21770
21771 // Decode unknown envelopes for gaps in ordinals.
21772 while _next_ordinal_to_read < 2 {
21773 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21774 _next_ordinal_to_read += 1;
21775 next_offset += envelope_size;
21776 }
21777
21778 let next_out_of_line = decoder.next_out_of_line();
21779 let handles_before = decoder.remaining_handles();
21780 if let Some((inlined, num_bytes, num_handles)) =
21781 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21782 {
21783 let member_inline_size =
21784 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21785 if inlined != (member_inline_size <= 4) {
21786 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21787 }
21788 let inner_offset;
21789 let mut inner_depth = depth.clone();
21790 if inlined {
21791 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21792 inner_offset = next_offset;
21793 } else {
21794 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21795 inner_depth.increment()?;
21796 }
21797 let val_ref = self.vmo_usable_start.get_or_insert_with(|| {
21798 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
21799 });
21800 fidl::decode!(
21801 u64,
21802 fidl::encoding::DefaultFuchsiaResourceDialect,
21803 val_ref,
21804 decoder,
21805 inner_offset,
21806 inner_depth
21807 )?;
21808 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21809 {
21810 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21811 }
21812 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21813 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21814 }
21815 }
21816
21817 next_offset += envelope_size;
21818 _next_ordinal_to_read += 1;
21819 if next_offset >= end_offset {
21820 return Ok(());
21821 }
21822
21823 // Decode unknown envelopes for gaps in ordinals.
21824 while _next_ordinal_to_read < 3 {
21825 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21826 _next_ordinal_to_read += 1;
21827 next_offset += envelope_size;
21828 }
21829
21830 let next_out_of_line = decoder.next_out_of_line();
21831 let handles_before = decoder.remaining_handles();
21832 if let Some((inlined, num_bytes, num_handles)) =
21833 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21834 {
21835 let member_inline_size = <fidl::encoding::HandleType<
21836 fidl::EventPair,
21837 { fidl::ObjectType::EVENTPAIR.into_raw() },
21838 2147483648,
21839 > as fidl::encoding::TypeMarker>::inline_size(
21840 decoder.context
21841 );
21842 if inlined != (member_inline_size <= 4) {
21843 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21844 }
21845 let inner_offset;
21846 let mut inner_depth = depth.clone();
21847 if inlined {
21848 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21849 inner_offset = next_offset;
21850 } else {
21851 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21852 inner_depth.increment()?;
21853 }
21854 let val_ref =
21855 self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21856 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21857 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21858 {
21859 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21860 }
21861 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21862 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21863 }
21864 }
21865
21866 next_offset += envelope_size;
21867
21868 // Decode the remaining unknown envelopes.
21869 while next_offset < end_offset {
21870 _next_ordinal_to_read += 1;
21871 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21872 next_offset += envelope_size;
21873 }
21874
21875 Ok(())
21876 }
21877 }
21878}